spa_errlog.c revision ea8dc4b6d2251b437950c0056bc626b311c73c27
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28/*
29 * Routines to manage the on-disk persistent error log.
30 *
31 * Each pool stores a log of all logical data errors seen during normal
32 * operation.  This is actually the union of two distinct logs: the last log,
33 * and the current log.  All errors seen are logged to the current log.  When a
34 * scrub completes, the current log becomes the last log, the last log is thrown
35 * out, and the current log is reinitialized.  This way, if an error is somehow
36 * corrected, a new scrub will show that that it no longer exists, and will be
37 * deleted from the log when the scrub completes.
38 *
39 * The log is stored using a ZAP object whose key is a string form of the
40 * zbookmark tuple (objset, object, level, blkid), and whose contents is an
41 * optional 'objset:object' human-readable string describing the data.  When an
42 * error is first logged, this string will be empty, indicating that no name is
43 * known.  This prevents us from having to issue a potentially large amount of
44 * I/O to discover the object name during an error path.  Instead, we do the
45 * calculation when the data is requested, storing the result so future queries
46 * will be faster.
47 *
48 * This log is then shipped into an nvlist where the key is the dataset name and
49 * the value is the object name.  Userland is then responsible for uniquifying
50 * this list and displaying it to the user.
51 */
52
53#include <sys/dmu_tx.h>
54#include <sys/spa.h>
55#include <sys/spa_impl.h>
56#include <sys/zap.h>
57#include <sys/zio.h>
58
59/*
60 * This is a stripped-down version of strtoull, suitable only for converting
61 * lowercase hexidecimal numbers that don't overflow.
62 */
63static uint64_t
64strtonum(char *str, char **nptr)
65{
66	uint64_t val = 0;
67	char c;
68	int digit;
69
70	while ((c = *str) != '\0') {
71		if (c >= '0' && c <= '9')
72			digit = c - '0';
73		else if (c >= 'a' && c <= 'f')
74			digit = 10 + c - 'a';
75		else
76			break;
77
78		val *= 16;
79		val += digit;
80
81		str++;
82	}
83
84	*nptr = str;
85
86	return (val);
87}
88
89/*
90 * Convert a bookmark to a string.
91 */
92static void
93bookmark_to_name(zbookmark_t *zb, char *buf, size_t len)
94{
95	(void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
96	    (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
97	    (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
98}
99
100/*
101 * Convert a string to a bookmark
102 */
103static void
104name_to_bookmark(char *buf, zbookmark_t *zb)
105{
106	zb->zb_objset = strtonum(buf, &buf);
107	ASSERT(*buf == ':');
108	zb->zb_object = strtonum(buf + 1, &buf);
109	ASSERT(*buf == ':');
110	zb->zb_level = (int)strtonum(buf + 1, &buf);
111	ASSERT(*buf == ':');
112	zb->zb_blkid = strtonum(buf + 1, &buf);
113	ASSERT(*buf == '\0');
114}
115
116/*
117 * Log an uncorrectable error to the persistent error log.  We add it to the
118 * spa's list of pending errors.  The changes are actually synced out to disk
119 * during spa_errlog_sync().
120 */
121void
122spa_log_error(spa_t *spa, zio_t *zio)
123{
124	zbookmark_t *zb = &zio->io_logical->io_bookmark;
125	spa_error_entry_t search;
126	spa_error_entry_t *new;
127	avl_tree_t *tree;
128	avl_index_t where;
129
130	/*
131	 * If we are trying to import a pool, ignore any errors, as we won't be
132	 * writing to the pool any time soon.
133	 */
134	if (spa->spa_load_state == SPA_LOAD_TRYIMPORT)
135		return;
136
137	mutex_enter(&spa->spa_errlist_lock);
138
139	/*
140	 * If we have had a request to rotate the log, log it to the next list
141	 * instead of the current one.
142	 */
143	if (spa->spa_scrub_active || spa->spa_scrub_finished)
144		tree = &spa->spa_errlist_scrub;
145	else
146		tree = &spa->spa_errlist_last;
147
148	search.se_bookmark = *zb;
149	if (avl_find(tree, &search, &where) != NULL) {
150		mutex_exit(&spa->spa_errlist_lock);
151		return;
152	}
153
154	new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
155	new->se_bookmark = *zb;
156	avl_insert(tree, new, where);
157
158	mutex_exit(&spa->spa_errlist_lock);
159}
160
161/*
162 * Return the number of errors currently in the error log.  This is actually the
163 * sum of both the last log and the current log, since we don't know the union
164 * of these logs until we reach userland.
165 */
166uint64_t
167spa_get_errlog_size(spa_t *spa)
168{
169	uint64_t total = 0, count;
170
171	mutex_enter(&spa->spa_errlog_lock);
172	if (spa->spa_errlog_scrub != 0 &&
173	    zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
174	    &count) == 0)
175		total += count;
176
177	if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
178	    zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
179	    &count) == 0)
180		total += count;
181	mutex_exit(&spa->spa_errlog_lock);
182
183	mutex_enter(&spa->spa_errlist_lock);
184	total += avl_numnodes(&spa->spa_errlist_last);
185	total += avl_numnodes(&spa->spa_errlist_scrub);
186	mutex_exit(&spa->spa_errlist_lock);
187
188	return (total);
189}
190
191#ifdef _KERNEL
192static int
193process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
194{
195	zap_cursor_t zc;
196	zap_attribute_t za;
197	zbookmark_t zb;
198
199	if (obj == 0)
200		return (0);
201
202	for (zap_cursor_init(&zc, spa->spa_meta_objset, obj);
203	    zap_cursor_retrieve(&zc, &za) == 0;
204	    zap_cursor_advance(&zc)) {
205
206		if (*count == 0) {
207			zap_cursor_fini(&zc);
208			return (ENOMEM);
209		}
210
211		name_to_bookmark(za.za_name, &zb);
212
213		if (copyout(&zb, (char *)addr +
214		    (*count - 1) * sizeof (zbookmark_t),
215		    sizeof (zbookmark_t)) != 0)
216			return (EFAULT);
217
218		*count -= 1;
219	}
220
221	zap_cursor_fini(&zc);
222
223	return (0);
224}
225
226static int
227process_error_list(avl_tree_t *list, void *addr, size_t *count)
228{
229	spa_error_entry_t *se;
230
231	for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
232
233		if (*count == 0)
234			return (ENOMEM);
235
236		if (copyout(&se->se_bookmark, (char *)addr +
237		    (*count - 1) * sizeof (zbookmark_t),
238		    sizeof (zbookmark_t)) != 0)
239			return (EFAULT);
240
241		*count -= 1;
242	}
243
244	return (0);
245}
246#endif
247
248/*
249 * Copy all known errors to userland as an array of bookmarks.  This is
250 * actually a union of the on-disk last log and current log, as well as any
251 * pending error requests.
252 *
253 * Because the act of reading the on-disk log could cause errors to be
254 * generated, we have two separate locks: one for the error log and one for the
255 * in-core error lists.  We only need the error list lock to log and error, so
256 * we grab the error log lock while we read the on-disk logs, and only pick up
257 * the error list lock when we are finished.
258 */
259int
260spa_get_errlog(spa_t *spa, void *uaddr, size_t *count)
261{
262	int ret = 0;
263
264#ifdef _KERNEL
265	mutex_enter(&spa->spa_errlog_lock);
266
267	ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
268
269	if (!ret && !spa->spa_scrub_finished)
270		ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
271		    count);
272
273	mutex_enter(&spa->spa_errlist_lock);
274	if (!ret)
275		ret = process_error_list(&spa->spa_errlist_scrub, uaddr,
276		    count);
277	if (!ret)
278		ret = process_error_list(&spa->spa_errlist_last, uaddr,
279		    count);
280	mutex_exit(&spa->spa_errlist_lock);
281
282	mutex_exit(&spa->spa_errlog_lock);
283#endif
284
285	return (ret);
286}
287
288/*
289 * Called when a scrub completes.  This simply set a bit which tells which AVL
290 * tree to add new errors.  spa_errlog_sync() is responsible for actually
291 * syncing the changes to the underlying objects.
292 */
293void
294spa_errlog_rotate(spa_t *spa)
295{
296	mutex_enter(&spa->spa_errlist_lock);
297
298	ASSERT(!spa->spa_scrub_finished);
299	spa->spa_scrub_finished = B_TRUE;
300
301	mutex_exit(&spa->spa_errlist_lock);
302}
303
304/*
305 * Discard any pending errors from the spa_t.  Called when unloading a faulted
306 * pool, as the errors encountered during the open cannot be synced to disk.
307 */
308void
309spa_errlog_drain(spa_t *spa)
310{
311	spa_error_entry_t *se;
312	void *cookie;
313
314	mutex_enter(&spa->spa_errlist_lock);
315
316	cookie = NULL;
317	while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
318	    &cookie)) != NULL)
319		kmem_free(se, sizeof (spa_error_entry_t));
320	cookie = NULL;
321	while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
322	    &cookie)) != NULL)
323		kmem_free(se, sizeof (spa_error_entry_t));
324
325	mutex_exit(&spa->spa_errlist_lock);
326}
327
328/*
329 * Process a list of errors into the current on-disk log.
330 */
331static void
332sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
333{
334	spa_error_entry_t *se;
335	char buf[64];
336	void *cookie;
337
338	if (avl_numnodes(t) != 0) {
339		/* create log if necessary */
340		if (*obj == 0)
341			*obj = zap_create(spa->spa_meta_objset,
342			    DMU_OT_ERROR_LOG, DMU_OT_NONE,
343			    0, tx);
344
345		/* add errors to the current log */
346		for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
347			char *name = se->se_name ? se->se_name : "";
348
349			bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
350
351			(void) zap_update(spa->spa_meta_objset,
352			    *obj, buf, 1, strlen(name) + 1, name, tx);
353		}
354
355		/* purge the error list */
356		cookie = NULL;
357		while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
358			kmem_free(se, sizeof (spa_error_entry_t));
359	}
360}
361
362/*
363 * Sync the error log out to disk.  This is a little tricky because the act of
364 * writing the error log requires the spa_errlist_lock.  So, we need to lock the
365 * error lists, take a copy of the lists, and then reinitialize them.  Then, we
366 * drop the error list lock and take the error log lock, at which point we
367 * do the errlog processing.  Then, if we encounter an I/O error during this
368 * process, we can successfully add the error to the list.  Note that this will
369 * result in the perpetual recycling of errors, but it is an unlikely situation
370 * and not a performance critical operation.
371 */
372void
373spa_errlog_sync(spa_t *spa, uint64_t txg)
374{
375	dmu_tx_t *tx;
376	avl_tree_t scrub, last;
377	int scrub_finished;
378
379	mutex_enter(&spa->spa_errlist_lock);
380
381	/*
382	 * Bail out early under normal circumstances.
383	 */
384	if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
385	    avl_numnodes(&spa->spa_errlist_last) == 0 &&
386	    !spa->spa_scrub_finished) {
387		mutex_exit(&spa->spa_errlist_lock);
388		return;
389	}
390
391	spa_get_errlists(spa, &last, &scrub);
392	scrub_finished = spa->spa_scrub_finished;
393	spa->spa_scrub_finished = B_FALSE;
394
395	mutex_exit(&spa->spa_errlist_lock);
396	mutex_enter(&spa->spa_errlog_lock);
397
398	tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
399
400	/*
401	 * Sync out the current list of errors.
402	 */
403	sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
404
405	/*
406	 * Rotate the log if necessary.
407	 */
408	if (scrub_finished) {
409		if (spa->spa_errlog_last != 0)
410			VERIFY(dmu_object_free(spa->spa_meta_objset,
411			    spa->spa_errlog_last, tx) == 0);
412		spa->spa_errlog_last = spa->spa_errlog_scrub;
413		spa->spa_errlog_scrub = 0;
414
415		sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
416	}
417
418	/*
419	 * Sync out any pending scrub errors.
420	 */
421	sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
422
423	/*
424	 * Update the MOS to reflect the new values.
425	 */
426	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
427	    DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
428	    &spa->spa_errlog_last, tx);
429	(void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
430	    DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
431	    &spa->spa_errlog_scrub, tx);
432
433	dmu_tx_commit(tx);
434
435	mutex_exit(&spa->spa_errlog_lock);
436}
437