xref: /illumos-gate/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c (revision 3c112a2b34403220c06c3e2fcac403358cfba168)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * ZFS syseventd module.
27  *
28  * The purpose of this module is to identify when devices are added to the
29  * system, and appropriately online or replace the affected vdevs.
30  *
31  * When a device is added to the system:
32  *
33  * 	1. Search for any vdevs whose devid matches that of the newly added
34  *	   device.
35  *
36  * 	2. If no vdevs are found, then search for any vdevs whose devfs path
37  *	   matches that of the new device.
38  *
39  *	3. If no vdevs match by either method, then ignore the event.
40  *
41  * 	4. Attempt to online the device with a flag to indicate that it should
42  *	   be unspared when resilvering completes.  If this succeeds, then the
43  *	   same device was inserted and we should continue normally.
44  *
45  *	5. If the pool does not have the 'autoreplace' property set, attempt to
46  *	   online the device again without the unspare flag, which will
47  *	   generate a FMA fault.
48  *
49  *	6. If the pool has the 'autoreplace' property set, and the matching vdev
50  *	   is a whole disk, then label the new disk and attempt a 'zpool
51  *	   replace'.
52  *
53  * The module responds to EC_DEV_ADD events for both disks and lofi devices,
54  * with the latter used for testing.  The special ESC_ZFS_VDEV_CHECK event
55  * indicates that a device failed to open during pool load, but the autoreplace
56  * property was set.  In this case, we deferred the associated FMA fault until
57  * our module had a chance to process the autoreplace logic.  If the device
58  * could not be replaced, then the second online attempt will trigger the FMA
59  * fault that we skipped earlier.
60  */
61 
62 #include <alloca.h>
63 #include <devid.h>
64 #include <fcntl.h>
65 #include <libnvpair.h>
66 #include <libsysevent.h>
67 #include <libzfs.h>
68 #include <limits.h>
69 #include <stdlib.h>
70 #include <string.h>
71 #include <syslog.h>
72 #include <sys/list.h>
73 #include <sys/sunddi.h>
74 #include <sys/sysevent/eventdefs.h>
75 #include <sys/sysevent/dev.h>
76 #include <thread_pool.h>
77 #include <unistd.h>
78 #include "syseventd.h"
79 
80 #if defined(__i386) || defined(__amd64)
81 #define	PHYS_PATH	":q"
82 #define	RAW_SLICE	"p0"
83 #elif defined(__sparc)
84 #define	PHYS_PATH	":c"
85 #define	RAW_SLICE	"s2"
86 #else
87 #error Unknown architecture
88 #endif
89 
90 typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
91 
92 libzfs_handle_t *g_zfshdl;
93 list_t g_pool_list;
94 tpool_t *g_tpool;
95 
96 typedef struct unavailpool {
97 	zpool_handle_t	*uap_zhp;
98 	list_node_t	uap_node;
99 } unavailpool_t;
100 
101 int
102 zfs_toplevel_state(zpool_handle_t *zhp)
103 {
104 	nvlist_t *nvroot;
105 	vdev_stat_t *vs;
106 	unsigned int c;
107 
108 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
109 	    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
110 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
111 	    (uint64_t **)&vs, &c) == 0);
112 	return (vs->vs_state);
113 }
114 
115 static int
116 zfs_unavail_pool(zpool_handle_t *zhp, void *data)
117 {
118 	if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
119 		unavailpool_t *uap;
120 		uap = malloc(sizeof (unavailpool_t));
121 		uap->uap_zhp = zhp;
122 		list_insert_tail((list_t *)data, uap);
123 	} else {
124 		zpool_close(zhp);
125 	}
126 	return (0);
127 }
128 
129 /*
130  * The device associated with the given vdev (either by devid or physical path)
131  * has been added to the system.  If 'isdisk' is set, then we only attempt a
132  * replacement if it's a whole disk.  This also implies that we should label the
133  * disk first.
134  *
135  * First, we attempt to online the device (making sure to undo any spare
136  * operation when finished).  If this succeeds, then we're done.  If it fails,
137  * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
138  * but that the label was not what we expected.  If the 'autoreplace' property
139  * is not set, then we relabel the disk (if specified), and attempt a 'zpool
140  * replace'.  If the online is successful, but the new state is something else
141  * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
142  * race, and we should avoid attempting to relabel the disk.
143  */
144 static void
145 zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
146 {
147 	char *path;
148 	vdev_state_t newstate;
149 	nvlist_t *nvroot, *newvd;
150 	uint64_t wholedisk = 0ULL;
151 	char *physpath = NULL;
152 	char rawpath[PATH_MAX], fullpath[PATH_MAX];
153 	size_t len;
154 
155 	if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
156 		return;
157 
158 	(void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
159 	(void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
160 
161 	/*
162 	 * We should have a way to online a device by guid.  With the current
163 	 * interface, we are forced to chop off the 's0' for whole disks.
164 	 */
165 	(void) strlcpy(fullpath, path, sizeof (fullpath));
166 	if (wholedisk)
167 		fullpath[strlen(fullpath) - 2] = '\0';
168 
169 	/*
170 	 * Attempt to online the device.  It would be nice to online this by
171 	 * GUID, but the current interface only supports lookup by path.
172 	 */
173 	if (zpool_vdev_online(zhp, fullpath,
174 	    ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
175 	    (newstate == VDEV_STATE_HEALTHY || newstate == VDEV_STATE_DEGRADED))
176 		return;
177 
178 	/*
179 	 * If the pool doesn't have the autoreplace property set, then attempt a
180 	 * true online (without the unspare flag), which will trigger a FMA
181 	 * fault.
182 	 */
183 	if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
184 	    (isdisk && !wholedisk)) {
185 		(void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
186 		    &newstate);
187 		return;
188 	}
189 
190 	if (isdisk) {
191 		/*
192 		 * If this is a request to label a whole disk, then attempt to
193 		 * write out the label.  Before we can label the disk, we need
194 		 * access to a raw node.  Ideally, we'd like to walk the devinfo
195 		 * tree and find a raw node from the corresponding parent node.
196 		 * This is overly complicated, and since we know how we labeled
197 		 * this device in the first place, we know it's save to switch
198 		 * from /dev/dsk to /dev/rdsk and append the backup slice.
199 		 *
200 		 * If any part of this process fails, then do a force online to
201 		 * trigger a ZFS fault for the device (and any hot spare
202 		 * replacement).
203 		 */
204 		if (strncmp(path, "/dev/dsk/", 9) != 0) {
205 			(void) zpool_vdev_online(zhp, fullpath,
206 			    ZFS_ONLINE_FORCEFAULT, &newstate);
207 			return;
208 		}
209 
210 		(void) strlcpy(rawpath, path + 9, sizeof (rawpath));
211 		len = strlen(rawpath);
212 		rawpath[len - 2] = '\0';
213 
214 		if (zpool_label_disk(g_zfshdl, zhp, rawpath) != 0) {
215 			(void) zpool_vdev_online(zhp, fullpath,
216 			    ZFS_ONLINE_FORCEFAULT, &newstate);
217 			return;
218 		}
219 	}
220 
221 	/*
222 	 * Cosntruct the root vdev to pass to zpool_vdev_attach().  While adding
223 	 * the entire vdev structure is harmless, we construct a reduced set of
224 	 * path/physpath/wholedisk to keep it simple.
225 	 */
226 	if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
227 		return;
228 
229 	if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
230 		nvlist_free(nvroot);
231 		return;
232 	}
233 
234 	if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
235 	    nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
236 	    (physpath != NULL && nvlist_add_string(newvd,
237 	    ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
238 	    nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
239 	    nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
240 	    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
241 	    1) != 0) {
242 		nvlist_free(newvd);
243 		nvlist_free(nvroot);
244 		return;
245 	}
246 
247 	nvlist_free(newvd);
248 
249 	(void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
250 
251 	nvlist_free(nvroot);
252 
253 }
254 
255 /*
256  * Utility functions to find a vdev matching given criteria.
257  */
258 typedef struct dev_data {
259 	const char		*dd_compare;
260 	const char		*dd_prop;
261 	zfs_process_func_t	dd_func;
262 	boolean_t		dd_found;
263 	boolean_t		dd_isdisk;
264 	uint64_t		dd_pool_guid;
265 	uint64_t		dd_vdev_guid;
266 } dev_data_t;
267 
268 static void
269 zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
270 {
271 	dev_data_t *dp = data;
272 	char *path;
273 	uint_t c, children;
274 	nvlist_t **child;
275 	size_t len;
276 	uint64_t guid;
277 
278 	/*
279 	 * First iterate over any children.
280 	 */
281 	if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
282 	    &child, &children) == 0) {
283 		for (c = 0; c < children; c++)
284 			zfs_iter_vdev(zhp, child[c], data);
285 		return;
286 	}
287 
288 	if (dp->dd_vdev_guid != 0) {
289 		if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
290 		    &guid) != 0 || guid != dp->dd_vdev_guid)
291 			return;
292 	} else {
293 		len = strlen(dp->dd_compare);
294 
295 		if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
296 		    strncmp(dp->dd_compare, path, len) != 0)
297 			return;
298 
299 		/*
300 		 * Normally, we want to have an exact match for the comparison
301 		 * string.  However, we allow substring matches in the following
302 		 * cases:
303 		 *
304 		 * 	<path>:		This is a devpath, and the target is one
305 		 * 			of its children.
306 		 *
307 		 * 	<path/>		This is a devid for a whole disk, and
308 		 * 			the target is one of its children.
309 		 */
310 		if (path[len] != '\0' && path[len] != ':' &&
311 		    path[len - 1] != '/')
312 			return;
313 	}
314 
315 	(dp->dd_func)(zhp, nvl, dp->dd_isdisk);
316 }
317 
318 void
319 zfs_enable_ds(void *arg)
320 {
321 	unavailpool_t *pool = (unavailpool_t *)arg;
322 
323 	(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
324 	zpool_close(pool->uap_zhp);
325 	free(pool);
326 }
327 
328 static int
329 zfs_iter_pool(zpool_handle_t *zhp, void *data)
330 {
331 	nvlist_t *config, *nvl;
332 	dev_data_t *dp = data;
333 	uint64_t pool_guid;
334 	unavailpool_t *pool;
335 
336 	if ((config = zpool_get_config(zhp, NULL)) != NULL) {
337 		if (dp->dd_pool_guid == 0 ||
338 		    (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
339 		    &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
340 			(void) nvlist_lookup_nvlist(config,
341 			    ZPOOL_CONFIG_VDEV_TREE, &nvl);
342 			zfs_iter_vdev(zhp, nvl, data);
343 		}
344 	}
345 	for (pool = list_head(&g_pool_list); pool != NULL;
346 	    pool = list_next(&g_pool_list, pool)) {
347 
348 		if (strcmp(zpool_get_name(zhp),
349 		    zpool_get_name(pool->uap_zhp)))
350 			continue;
351 		if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
352 			list_remove(&g_pool_list, pool);
353 			(void) tpool_dispatch(g_tpool, zfs_enable_ds, pool);
354 			break;
355 		}
356 	}
357 
358 	zpool_close(zhp);
359 	return (0);
360 }
361 
362 /*
363  * Given a physical device path, iterate over all (pool, vdev) pairs which
364  * correspond to the given path.
365  */
366 static boolean_t
367 devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
368 {
369 	dev_data_t data = { 0 };
370 
371 	data.dd_compare = devpath;
372 	data.dd_func = func;
373 	data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
374 	data.dd_found = B_FALSE;
375 	data.dd_isdisk = wholedisk;
376 
377 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
378 
379 	return (data.dd_found);
380 }
381 
382 /*
383  * Given a /devices path, lookup the corresponding devid for each minor node,
384  * and find any vdevs with matching devids.  Doing this straight up would be
385  * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
386  * the fact that each devid ends with "/<minornode>".  Once we find any valid
387  * minor node, we chop off the portion after the last slash, and then search for
388  * matching vdevs, which is O(vdevs in system).
389  */
390 static boolean_t
391 devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
392 {
393 	size_t len = strlen(devpath) + sizeof ("/devices") +
394 	    sizeof (PHYS_PATH) - 1;
395 	char *fullpath;
396 	int fd;
397 	ddi_devid_t devid;
398 	char *devidstr, *fulldevid;
399 	dev_data_t data = { 0 };
400 
401 	/*
402 	 * Try to open a known minor node.
403 	 */
404 	fullpath = alloca(len);
405 	(void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
406 	if ((fd = open(fullpath, O_RDONLY)) < 0)
407 		return (B_FALSE);
408 
409 	/*
410 	 * Determine the devid as a string, with no trailing slash for the minor
411 	 * node.
412 	 */
413 	if (devid_get(fd, &devid) != 0) {
414 		(void) close(fd);
415 		return (B_FALSE);
416 	}
417 	(void) close(fd);
418 
419 	if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
420 		devid_free(devid);
421 		return (B_FALSE);
422 	}
423 
424 	len = strlen(devidstr) + 2;
425 	fulldevid = alloca(len);
426 	(void) snprintf(fulldevid, len, "%s/", devidstr);
427 
428 	data.dd_compare = fulldevid;
429 	data.dd_func = func;
430 	data.dd_prop = ZPOOL_CONFIG_DEVID;
431 	data.dd_found = B_FALSE;
432 	data.dd_isdisk = wholedisk;
433 
434 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
435 
436 	devid_str_free(devidstr);
437 	devid_free(devid);
438 
439 	return (data.dd_found);
440 }
441 
442 /*
443  * This function is called when we receive a devfs add event.  This can be
444  * either a disk event or a lofi event, and the behavior is slightly different
445  * depending on which it is.
446  */
447 static int
448 zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
449 {
450 	char *devpath, *devname;
451 	char path[PATH_MAX], realpath[PATH_MAX];
452 	char *colon, *raw;
453 	int ret;
454 
455 	/*
456 	 * The main unit of operation is the physical device path.  For disks,
457 	 * this is the device node, as all minor nodes are affected.  For lofi
458 	 * devices, this includes the minor path.  Unfortunately, this isn't
459 	 * represented in the DEV_PHYS_PATH for various reasons.
460 	 */
461 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0)
462 		return (-1);
463 
464 	/*
465 	 * If this is a lofi device, then also get the minor instance name.
466 	 * Unfortunately, the current payload doesn't include an easy way to get
467 	 * this information.  So we cheat by resolving the 'dev_name' (which
468 	 * refers to the raw device) and taking the portion between ':(*),raw'.
469 	 */
470 	(void) strlcpy(realpath, devpath, sizeof (realpath));
471 	if (is_lofi) {
472 		if (nvlist_lookup_string(nvl, DEV_NAME,
473 		    &devname) == 0 &&
474 		    (ret = resolvepath(devname, path,
475 		    sizeof (path))) > 0) {
476 			path[ret] = '\0';
477 			colon = strchr(path, ':');
478 			if (colon != NULL)
479 				raw = strstr(colon + 1, ",raw");
480 			if (colon != NULL && raw != NULL) {
481 				*raw = '\0';
482 				(void) snprintf(realpath,
483 				    sizeof (realpath), "%s%s",
484 				    devpath, colon);
485 				*raw = ',';
486 			}
487 		}
488 	}
489 
490 	/*
491 	 * Iterate over all vdevs with a matching devid, and then those with a
492 	 * matching /devices path.  For disks, we only want to pay attention to
493 	 * vdevs marked as whole disks.  For lofi, we don't care (because we're
494 	 * matching an exact minor name).
495 	 */
496 	if (!devid_iter(realpath, zfs_process_add, !is_lofi))
497 		(void) devpath_iter(realpath, zfs_process_add, !is_lofi);
498 
499 	return (0);
500 }
501 
502 /*
503  * Called when we receive a VDEV_CHECK event, which indicates a device could not
504  * be opened during initial pool open, but the autoreplace property was set on
505  * the pool.  In this case, we treat it as if it were an add event.
506  */
507 static int
508 zfs_deliver_check(nvlist_t *nvl)
509 {
510 	dev_data_t data = { 0 };
511 
512 	if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
513 	    &data.dd_pool_guid) != 0 ||
514 	    nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
515 	    &data.dd_vdev_guid) != 0)
516 		return (0);
517 
518 	data.dd_isdisk = B_TRUE;
519 	data.dd_func = zfs_process_add;
520 
521 	(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
522 
523 	return (0);
524 }
525 
526 #define	DEVICE_PREFIX	"/devices"
527 
528 static int
529 zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
530 {
531 	char *devname = data;
532 	boolean_t avail_spare, l2cache;
533 	vdev_state_t newstate;
534 	nvlist_t *tgt;
535 
536 	syseventd_print(9, "zfsdle_vdev_online: searching for %s in pool %s\n",
537 	    devname, zpool_get_name(zhp));
538 
539 	if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
540 	    &avail_spare, &l2cache, NULL)) != NULL) {
541 		char *path, fullpath[MAXPATHLEN];
542 		uint64_t wholedisk = 0ULL;
543 
544 		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
545 		    &path) == 0);
546 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
547 		    &wholedisk) == 0);
548 
549 		(void) strlcpy(fullpath, path, sizeof (fullpath));
550 		if (wholedisk)
551 			fullpath[strlen(fullpath) - 2] = '\0';
552 
553 		if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
554 			syseventd_print(9, "zfsdle_vdev_online: setting device"
555 			    " device %s to ONLINE state in pool %s.\n",
556 			    fullpath, zpool_get_name(zhp));
557 			if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL)
558 				(void) zpool_vdev_online(zhp, fullpath, 0,
559 				    &newstate);
560 		}
561 		zpool_close(zhp);
562 		return (1);
563 	}
564 	zpool_close(zhp);
565 	return (0);
566 }
567 
568 int
569 zfs_deliver_dle(nvlist_t *nvl)
570 {
571 	char *devname;
572 	if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
573 		syseventd_print(9, "zfs_deliver_event: no physpath\n");
574 		return (-1);
575 	}
576 	if (strncmp(devname, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
577 		syseventd_print(9, "zfs_deliver_event: invalid "
578 		    "device '%s'", devname);
579 		return (-1);
580 	}
581 
582 	/*
583 	 * We try to find the device using the physical
584 	 * path that has been supplied. We need to strip off
585 	 * the /devices prefix before starting our search.
586 	 */
587 	devname += strlen(DEVICE_PREFIX);
588 	if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
589 		syseventd_print(9, "zfs_deliver_event: device '%s' not"
590 		    " found\n", devname);
591 		return (1);
592 	}
593 	return (0);
594 }
595 
596 
597 /*ARGSUSED*/
598 static int
599 zfs_deliver_event(sysevent_t *ev, int unused)
600 {
601 	const char *class = sysevent_get_class_name(ev);
602 	const char *subclass = sysevent_get_subclass_name(ev);
603 	nvlist_t *nvl;
604 	int ret;
605 	boolean_t is_lofi, is_check, is_dle = B_FALSE;
606 
607 	if (strcmp(class, EC_DEV_ADD) == 0) {
608 		/*
609 		 * We're mainly interested in disk additions, but we also listen
610 		 * for new lofi devices, to allow for simplified testing.
611 		 */
612 		if (strcmp(subclass, ESC_DISK) == 0)
613 			is_lofi = B_FALSE;
614 		else if (strcmp(subclass, ESC_LOFI) == 0)
615 			is_lofi = B_TRUE;
616 		else
617 			return (0);
618 
619 		is_check = B_FALSE;
620 	} else if (strcmp(class, EC_ZFS) == 0 &&
621 	    strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
622 		/*
623 		 * This event signifies that a device failed to open during pool
624 		 * load, but the 'autoreplace' property was set, so we should
625 		 * pretend it's just been added.
626 		 */
627 		is_check = B_TRUE;
628 	} else if (strcmp(class, EC_DEV_STATUS) == 0 &&
629 	    strcmp(subclass, ESC_DEV_DLE) == 0) {
630 		is_dle = B_TRUE;
631 	} else {
632 		return (0);
633 	}
634 
635 	if (sysevent_get_attr_list(ev, &nvl) != 0)
636 		return (-1);
637 
638 	if (is_dle)
639 		ret = zfs_deliver_dle(nvl);
640 	else if (is_check)
641 		ret = zfs_deliver_check(nvl);
642 	else
643 		ret = zfs_deliver_add(nvl, is_lofi);
644 
645 	nvlist_free(nvl);
646 	return (ret);
647 }
648 
649 static struct slm_mod_ops zfs_mod_ops = {
650 	SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
651 };
652 
653 struct slm_mod_ops *
654 slm_init()
655 {
656 	if ((g_zfshdl = libzfs_init()) == NULL)
657 		return (NULL);
658 	/* collect a list of unavailable pools */
659 	list_create(&g_pool_list, sizeof (struct unavailpool),
660 	    offsetof(struct unavailpool, uap_node));
661 	(void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
662 	if (!list_is_empty(&g_pool_list))
663 		g_tpool = tpool_create(1, sysconf(_SC_NPROCESSORS_ONLN),
664 		    0, NULL);
665 	return (&zfs_mod_ops);
666 }
667 
668 void
669 slm_fini()
670 {
671 	unavailpool_t *pool;
672 
673 	if (g_tpool) {
674 		tpool_wait(g_tpool);
675 		tpool_destroy(g_tpool);
676 	}
677 	while ((pool = (list_head(&g_pool_list))) != NULL) {
678 		list_remove(&g_pool_list, pool);
679 		zpool_close(pool->uap_zhp);
680 		free(pool);
681 	}
682 	list_destroy(&g_pool_list);
683 	libzfs_fini(g_zfshdl);
684 }
685