13d7072f8Seschrock /*
23d7072f8Seschrock * CDDL HEADER START
33d7072f8Seschrock *
43d7072f8Seschrock * The contents of this file are subject to the terms of the
53d7072f8Seschrock * Common Development and Distribution License (the "License").
63d7072f8Seschrock * You may not use this file except in compliance with the License.
73d7072f8Seschrock *
83d7072f8Seschrock * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
93d7072f8Seschrock * or http://www.opensolaris.org/os/licensing.
103d7072f8Seschrock * See the License for the specific language governing permissions
113d7072f8Seschrock * and limitations under the License.
123d7072f8Seschrock *
133d7072f8Seschrock * When distributing Covered Code, include this CDDL HEADER in each
143d7072f8Seschrock * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
153d7072f8Seschrock * If applicable, add the following below this CDDL HEADER, with the
163d7072f8Seschrock * fields enclosed by brackets "[]" replaced with your own identifying
173d7072f8Seschrock * information: Portions Copyright [yyyy] [name of copyright owner]
183d7072f8Seschrock *
193d7072f8Seschrock * CDDL HEADER END
203d7072f8Seschrock */
213d7072f8Seschrock /*
22b98131cfSEric Taylor * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
234263d13fSGeorge Wilson * Copyright (c) 2012 by Delphix. All rights reserved.
24cead1df3SHans Rosenfeld * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
256f0e4dc9SAndy Fiddaman * Copyright 2022 OmniOS Community Edition (OmniOSce) Association.
26*b4fb0039SJoshua M. Clulow * Copyright 2022 Oxide Computer Company
273d7072f8Seschrock */
283d7072f8Seschrock
293d7072f8Seschrock /*
303d7072f8Seschrock * ZFS syseventd module.
313d7072f8Seschrock *
323d7072f8Seschrock * The purpose of this module is to identify when devices are added to the
333d7072f8Seschrock * system, and appropriately online or replace the affected vdevs.
343d7072f8Seschrock *
353d7072f8Seschrock * When a device is added to the system:
363d7072f8Seschrock *
376f0e4dc9SAndy Fiddaman * 1. Search for any vdevs whose devid matches that of the newly added
383d7072f8Seschrock * device.
393d7072f8Seschrock *
406f0e4dc9SAndy Fiddaman * 2. If no vdevs are found, then search for any vdevs whose devfs path
413d7072f8Seschrock * matches that of the new device.
423d7072f8Seschrock *
433d7072f8Seschrock * 3. If no vdevs match by either method, then ignore the event.
443d7072f8Seschrock *
456f0e4dc9SAndy Fiddaman * 4. Attempt to online the device with a flag to indicate that it should
463d7072f8Seschrock * be unspared when resilvering completes. If this succeeds, then the
473d7072f8Seschrock * same device was inserted and we should continue normally.
483d7072f8Seschrock *
493d7072f8Seschrock * 5. If the pool does not have the 'autoreplace' property set, attempt to
503d7072f8Seschrock * online the device again without the unspare flag, which will
513d7072f8Seschrock * generate a FMA fault.
523d7072f8Seschrock *
533d7072f8Seschrock * 6. If the pool has the 'autoreplace' property set, and the matching vdev
543d7072f8Seschrock * is a whole disk, then label the new disk and attempt a 'zpool
553d7072f8Seschrock * replace'.
563d7072f8Seschrock *
573d7072f8Seschrock * The module responds to EC_DEV_ADD events for both disks and lofi devices,
583d7072f8Seschrock * with the latter used for testing. The special ESC_ZFS_VDEV_CHECK event
593d7072f8Seschrock * indicates that a device failed to open during pool load, but the autoreplace
603d7072f8Seschrock * property was set. In this case, we deferred the associated FMA fault until
613d7072f8Seschrock * our module had a chance to process the autoreplace logic. If the device
623d7072f8Seschrock * could not be replaced, then the second online attempt will trigger the FMA
633d7072f8Seschrock * fault that we skipped earlier.
643d7072f8Seschrock */
653d7072f8Seschrock
663d7072f8Seschrock #include <alloca.h>
673d7072f8Seschrock #include <devid.h>
683d7072f8Seschrock #include <fcntl.h>
693d7072f8Seschrock #include <libnvpair.h>
703d7072f8Seschrock #include <libsysevent.h>
713d7072f8Seschrock #include <libzfs.h>
723d7072f8Seschrock #include <limits.h>
733d7072f8Seschrock #include <stdlib.h>
743d7072f8Seschrock #include <string.h>
753d7072f8Seschrock #include <syslog.h>
763c112a2bSEric Taylor #include <sys/list.h>
773d7072f8Seschrock #include <sys/sunddi.h>
783d7072f8Seschrock #include <sys/sysevent/eventdefs.h>
793d7072f8Seschrock #include <sys/sysevent/dev.h>
803c112a2bSEric Taylor #include <thread_pool.h>
813d7072f8Seschrock #include <unistd.h>
82b98131cfSEric Taylor #include "syseventd.h"
833d7072f8Seschrock
843d7072f8Seschrock #if defined(__i386) || defined(__amd64)
853d7072f8Seschrock #define PHYS_PATH ":q"
863d7072f8Seschrock #define RAW_SLICE "p0"
873d7072f8Seschrock #elif defined(__sparc)
883d7072f8Seschrock #define PHYS_PATH ":c"
893d7072f8Seschrock #define RAW_SLICE "s2"
903d7072f8Seschrock #else
913d7072f8Seschrock #error Unknown architecture
923d7072f8Seschrock #endif
933d7072f8Seschrock
943d7072f8Seschrock typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
953d7072f8Seschrock
963d7072f8Seschrock libzfs_handle_t *g_zfshdl;
973c112a2bSEric Taylor list_t g_pool_list;
983c112a2bSEric Taylor tpool_t *g_tpool;
9937e3a0d8SEric Taylor boolean_t g_enumeration_done;
10037e3a0d8SEric Taylor thread_t g_zfs_tid;
1013c112a2bSEric Taylor
1023c112a2bSEric Taylor typedef struct unavailpool {
1033c112a2bSEric Taylor zpool_handle_t *uap_zhp;
1043c112a2bSEric Taylor list_node_t uap_node;
1053c112a2bSEric Taylor } unavailpool_t;
1063c112a2bSEric Taylor
1073c112a2bSEric Taylor int
zfs_toplevel_state(zpool_handle_t * zhp)1083c112a2bSEric Taylor zfs_toplevel_state(zpool_handle_t *zhp)
1093c112a2bSEric Taylor {
1103c112a2bSEric Taylor nvlist_t *nvroot;
1113c112a2bSEric Taylor vdev_stat_t *vs;
1123c112a2bSEric Taylor unsigned int c;
1133c112a2bSEric Taylor
1143c112a2bSEric Taylor verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1153c112a2bSEric Taylor ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1163c112a2bSEric Taylor verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
1173c112a2bSEric Taylor (uint64_t **)&vs, &c) == 0);
1183c112a2bSEric Taylor return (vs->vs_state);
1193c112a2bSEric Taylor }
1203c112a2bSEric Taylor
1213c112a2bSEric Taylor static int
zfs_unavail_pool(zpool_handle_t * zhp,void * data)1223c112a2bSEric Taylor zfs_unavail_pool(zpool_handle_t *zhp, void *data)
1233c112a2bSEric Taylor {
1243c112a2bSEric Taylor if (zfs_toplevel_state(zhp) < VDEV_STATE_DEGRADED) {
1253c112a2bSEric Taylor unavailpool_t *uap;
1263c112a2bSEric Taylor uap = malloc(sizeof (unavailpool_t));
1273c112a2bSEric Taylor uap->uap_zhp = zhp;
1283c112a2bSEric Taylor list_insert_tail((list_t *)data, uap);
1293c112a2bSEric Taylor } else {
1303c112a2bSEric Taylor zpool_close(zhp);
1313c112a2bSEric Taylor }
1323c112a2bSEric Taylor return (0);
1333c112a2bSEric Taylor }
1343d7072f8Seschrock
1353d7072f8Seschrock /*
1363d7072f8Seschrock * The device associated with the given vdev (either by devid or physical path)
1373d7072f8Seschrock * has been added to the system. If 'isdisk' is set, then we only attempt a
1383d7072f8Seschrock * replacement if it's a whole disk. This also implies that we should label the
1393d7072f8Seschrock * disk first.
1403d7072f8Seschrock *
1413d7072f8Seschrock * First, we attempt to online the device (making sure to undo any spare
1423d7072f8Seschrock * operation when finished). If this succeeds, then we're done. If it fails,
1433d7072f8Seschrock * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
1443d7072f8Seschrock * but that the label was not what we expected. If the 'autoreplace' property
1453d7072f8Seschrock * is not set, then we relabel the disk (if specified), and attempt a 'zpool
1463d7072f8Seschrock * replace'. If the online is successful, but the new state is something else
1473d7072f8Seschrock * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
1483d7072f8Seschrock * race, and we should avoid attempting to relabel the disk.
1493d7072f8Seschrock */
1503d7072f8Seschrock static void
zfs_process_add(zpool_handle_t * zhp,nvlist_t * vdev,boolean_t isdisk)1513d7072f8Seschrock zfs_process_add(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
1523d7072f8Seschrock {
1533d7072f8Seschrock char *path;
1543d7072f8Seschrock vdev_state_t newstate;
1553d7072f8Seschrock nvlist_t *nvroot, *newvd;
1563d7072f8Seschrock uint64_t wholedisk = 0ULL;
157acd07c6bSYuri Pankov uint64_t offline = 0ULL;
158bf82a41bSeschrock char *physpath = NULL;
1593d7072f8Seschrock char rawpath[PATH_MAX], fullpath[PATH_MAX];
1607855d95bSToomas Soome zpool_boot_label_t boot_type;
1617855d95bSToomas Soome uint64_t boot_size;
1623d7072f8Seschrock size_t len;
1633d7072f8Seschrock
1643d7072f8Seschrock if (nvlist_lookup_string(vdev, ZPOOL_CONFIG_PATH, &path) != 0)
1653d7072f8Seschrock return;
1663d7072f8Seschrock
167bf82a41bSeschrock (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &physpath);
1683d7072f8Seschrock (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk);
169acd07c6bSYuri Pankov (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_OFFLINE, &offline);
1703d7072f8Seschrock
1713d7072f8Seschrock /*
1723d7072f8Seschrock * We should have a way to online a device by guid. With the current
1733d7072f8Seschrock * interface, we are forced to chop off the 's0' for whole disks.
1743d7072f8Seschrock */
1753d7072f8Seschrock (void) strlcpy(fullpath, path, sizeof (fullpath));
1763d7072f8Seschrock if (wholedisk)
1773d7072f8Seschrock fullpath[strlen(fullpath) - 2] = '\0';
1783d7072f8Seschrock
1793d7072f8Seschrock /*
1803d7072f8Seschrock * Attempt to online the device. It would be nice to online this by
1813d7072f8Seschrock * GUID, but the current interface only supports lookup by path.
1823d7072f8Seschrock */
183acd07c6bSYuri Pankov if (offline ||
184acd07c6bSYuri Pankov (zpool_vdev_online(zhp, fullpath,
1853d7072f8Seschrock ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE, &newstate) == 0 &&
186acd07c6bSYuri Pankov (newstate == VDEV_STATE_HEALTHY ||
187acd07c6bSYuri Pankov newstate == VDEV_STATE_DEGRADED)))
1883d7072f8Seschrock return;
1893d7072f8Seschrock
1903d7072f8Seschrock /*
1913d7072f8Seschrock * If the pool doesn't have the autoreplace property set, then attempt a
1923d7072f8Seschrock * true online (without the unspare flag), which will trigger a FMA
1933d7072f8Seschrock * fault.
1943d7072f8Seschrock */
195990b4856Slling if (!zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOREPLACE, NULL) ||
1963d7072f8Seschrock (isdisk && !wholedisk)) {
1973d7072f8Seschrock (void) zpool_vdev_online(zhp, fullpath, ZFS_ONLINE_FORCEFAULT,
1983d7072f8Seschrock &newstate);
1993d7072f8Seschrock return;
2003d7072f8Seschrock }
2013d7072f8Seschrock
2023d7072f8Seschrock if (isdisk) {
2033d7072f8Seschrock /*
2043d7072f8Seschrock * If this is a request to label a whole disk, then attempt to
2053d7072f8Seschrock * write out the label. Before we can label the disk, we need
2063d7072f8Seschrock * access to a raw node. Ideally, we'd like to walk the devinfo
2073d7072f8Seschrock * tree and find a raw node from the corresponding parent node.
2083d7072f8Seschrock * This is overly complicated, and since we know how we labeled
2093d7072f8Seschrock * this device in the first place, we know it's save to switch
2103d7072f8Seschrock * from /dev/dsk to /dev/rdsk and append the backup slice.
211c5904d13Seschrock *
212c5904d13Seschrock * If any part of this process fails, then do a force online to
213c5904d13Seschrock * trigger a ZFS fault for the device (and any hot spare
214c5904d13Seschrock * replacement).
2153d7072f8Seschrock */
2166401734dSWill Andrews if (strncmp(path, ZFS_DISK_ROOTD,
2176401734dSWill Andrews strlen(ZFS_DISK_ROOTD)) != 0) {
218c5904d13Seschrock (void) zpool_vdev_online(zhp, fullpath,
219c5904d13Seschrock ZFS_ONLINE_FORCEFAULT, &newstate);
2203d7072f8Seschrock return;
221c5904d13Seschrock }
2223d7072f8Seschrock
2233d7072f8Seschrock (void) strlcpy(rawpath, path + 9, sizeof (rawpath));
2243d7072f8Seschrock len = strlen(rawpath);
2253d7072f8Seschrock rawpath[len - 2] = '\0';
2263d7072f8Seschrock
2277855d95bSToomas Soome if (zpool_is_bootable(zhp))
2287855d95bSToomas Soome boot_type = ZPOOL_COPY_BOOT_LABEL;
2297855d95bSToomas Soome else
2307855d95bSToomas Soome boot_type = ZPOOL_NO_BOOT_LABEL;
2317855d95bSToomas Soome
2327855d95bSToomas Soome boot_size = zpool_get_prop_int(zhp, ZPOOL_PROP_BOOTSIZE, NULL);
2337855d95bSToomas Soome if (zpool_label_disk(g_zfshdl, zhp, rawpath,
2347855d95bSToomas Soome boot_type, boot_size, NULL) != 0) {
235c5904d13Seschrock (void) zpool_vdev_online(zhp, fullpath,
236c5904d13Seschrock ZFS_ONLINE_FORCEFAULT, &newstate);
2373d7072f8Seschrock return;
238c5904d13Seschrock }
2393d7072f8Seschrock }
2403d7072f8Seschrock
2413d7072f8Seschrock /*
2423d7072f8Seschrock * Cosntruct the root vdev to pass to zpool_vdev_attach(). While adding
2433d7072f8Seschrock * the entire vdev structure is harmless, we construct a reduced set of
244bf82a41bSeschrock * path/physpath/wholedisk to keep it simple.
2453d7072f8Seschrock */
2463d7072f8Seschrock if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
2473d7072f8Seschrock return;
2483d7072f8Seschrock
2493d7072f8Seschrock if (nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) {
2503d7072f8Seschrock nvlist_free(nvroot);
2513d7072f8Seschrock return;
2523d7072f8Seschrock }
2533d7072f8Seschrock
2543d7072f8Seschrock if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, VDEV_TYPE_DISK) != 0 ||
2553d7072f8Seschrock nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 ||
256bf82a41bSeschrock (physpath != NULL && nvlist_add_string(newvd,
257bf82a41bSeschrock ZPOOL_CONFIG_PHYS_PATH, physpath) != 0) ||
2583d7072f8Seschrock nvlist_add_uint64(newvd, ZPOOL_CONFIG_WHOLE_DISK, wholedisk) != 0 ||
2593d7072f8Seschrock nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 ||
2603d7072f8Seschrock nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &newvd,
2613d7072f8Seschrock 1) != 0) {
2623d7072f8Seschrock nvlist_free(newvd);
2633d7072f8Seschrock nvlist_free(nvroot);
2643d7072f8Seschrock return;
2653d7072f8Seschrock }
2663d7072f8Seschrock
2673d7072f8Seschrock nvlist_free(newvd);
2683d7072f8Seschrock
2693d7072f8Seschrock (void) zpool_vdev_attach(zhp, fullpath, path, nvroot, B_TRUE);
2703d7072f8Seschrock
2713d7072f8Seschrock nvlist_free(nvroot);
2723d7072f8Seschrock
2733d7072f8Seschrock }
2743d7072f8Seschrock
2753d7072f8Seschrock /*
2763d7072f8Seschrock * Utility functions to find a vdev matching given criteria.
2773d7072f8Seschrock */
2783d7072f8Seschrock typedef struct dev_data {
2793d7072f8Seschrock const char *dd_compare;
2803d7072f8Seschrock const char *dd_prop;
2813d7072f8Seschrock zfs_process_func_t dd_func;
2823d7072f8Seschrock boolean_t dd_found;
2833d7072f8Seschrock boolean_t dd_isdisk;
2843d7072f8Seschrock uint64_t dd_pool_guid;
2853d7072f8Seschrock uint64_t dd_vdev_guid;
2863d7072f8Seschrock } dev_data_t;
2873d7072f8Seschrock
2883d7072f8Seschrock static void
zfs_iter_vdev(zpool_handle_t * zhp,nvlist_t * nvl,void * data)2893d7072f8Seschrock zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
2903d7072f8Seschrock {
2913d7072f8Seschrock dev_data_t *dp = data;
2923d7072f8Seschrock char *path;
2933d7072f8Seschrock uint_t c, children;
2943d7072f8Seschrock nvlist_t **child;
295b01c3b58Seschrock size_t len;
2963d7072f8Seschrock uint64_t guid;
2973d7072f8Seschrock
2983d7072f8Seschrock /*
2993d7072f8Seschrock * First iterate over any children.
3003d7072f8Seschrock */
3013d7072f8Seschrock if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN,
3023d7072f8Seschrock &child, &children) == 0) {
3033d7072f8Seschrock for (c = 0; c < children; c++)
3043d7072f8Seschrock zfs_iter_vdev(zhp, child[c], data);
3053d7072f8Seschrock return;
3063d7072f8Seschrock }
3073d7072f8Seschrock
3083d7072f8Seschrock if (dp->dd_vdev_guid != 0) {
3093d7072f8Seschrock if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
3103d7072f8Seschrock &guid) != 0 || guid != dp->dd_vdev_guid)
3113d7072f8Seschrock return;
31214372834SHans Rosenfeld } else if (dp->dd_compare != NULL) {
313b01c3b58Seschrock len = strlen(dp->dd_compare);
314b01c3b58Seschrock
3153d7072f8Seschrock if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
3163d7072f8Seschrock strncmp(dp->dd_compare, path, len) != 0)
3173d7072f8Seschrock return;
3183d7072f8Seschrock
3193d7072f8Seschrock /*
3203d7072f8Seschrock * Normally, we want to have an exact match for the comparison
3213d7072f8Seschrock * string. However, we allow substring matches in the following
3223d7072f8Seschrock * cases:
3233d7072f8Seschrock *
3246f0e4dc9SAndy Fiddaman * <path>: This is a devpath, and the target is one
3256f0e4dc9SAndy Fiddaman * of its children.
3263d7072f8Seschrock *
3276f0e4dc9SAndy Fiddaman * <path/> This is a devid for a whole disk, and
3286f0e4dc9SAndy Fiddaman * the target is one of its children.
3293d7072f8Seschrock */
3303d7072f8Seschrock if (path[len] != '\0' && path[len] != ':' &&
3313d7072f8Seschrock path[len - 1] != '/')
3323d7072f8Seschrock return;
3333d7072f8Seschrock }
3343d7072f8Seschrock
3353d7072f8Seschrock (dp->dd_func)(zhp, nvl, dp->dd_isdisk);
3363d7072f8Seschrock }
3373d7072f8Seschrock
3383c112a2bSEric Taylor void
zfs_enable_ds(void * arg)3393c112a2bSEric Taylor zfs_enable_ds(void *arg)
3403c112a2bSEric Taylor {
3413c112a2bSEric Taylor unavailpool_t *pool = (unavailpool_t *)arg;
3423c112a2bSEric Taylor
3433c112a2bSEric Taylor (void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
3443c112a2bSEric Taylor zpool_close(pool->uap_zhp);
3453c112a2bSEric Taylor free(pool);
3463c112a2bSEric Taylor }
3473c112a2bSEric Taylor
3483d7072f8Seschrock static int
zfs_iter_pool(zpool_handle_t * zhp,void * data)3493d7072f8Seschrock zfs_iter_pool(zpool_handle_t *zhp, void *data)
3503d7072f8Seschrock {
3513d7072f8Seschrock nvlist_t *config, *nvl;
3523d7072f8Seschrock dev_data_t *dp = data;
3533d7072f8Seschrock uint64_t pool_guid;
3543c112a2bSEric Taylor unavailpool_t *pool;
3553d7072f8Seschrock
3563d7072f8Seschrock if ((config = zpool_get_config(zhp, NULL)) != NULL) {
3573d7072f8Seschrock if (dp->dd_pool_guid == 0 ||
3583d7072f8Seschrock (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
3593d7072f8Seschrock &pool_guid) == 0 && pool_guid == dp->dd_pool_guid)) {
3603d7072f8Seschrock (void) nvlist_lookup_nvlist(config,
3613d7072f8Seschrock ZPOOL_CONFIG_VDEV_TREE, &nvl);
3623d7072f8Seschrock zfs_iter_vdev(zhp, nvl, data);
3633d7072f8Seschrock }
3643d7072f8Seschrock }
36537e3a0d8SEric Taylor if (g_enumeration_done) {
36637e3a0d8SEric Taylor for (pool = list_head(&g_pool_list); pool != NULL;
36737e3a0d8SEric Taylor pool = list_next(&g_pool_list, pool)) {
36837e3a0d8SEric Taylor
36937e3a0d8SEric Taylor if (strcmp(zpool_get_name(zhp),
37037e3a0d8SEric Taylor zpool_get_name(pool->uap_zhp)))
37137e3a0d8SEric Taylor continue;
37237e3a0d8SEric Taylor if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
37337e3a0d8SEric Taylor list_remove(&g_pool_list, pool);
37437e3a0d8SEric Taylor (void) tpool_dispatch(g_tpool, zfs_enable_ds,
37537e3a0d8SEric Taylor pool);
37637e3a0d8SEric Taylor break;
37737e3a0d8SEric Taylor }
3783c112a2bSEric Taylor }
3793c112a2bSEric Taylor }
3803d7072f8Seschrock
3813d7072f8Seschrock zpool_close(zhp);
3823d7072f8Seschrock return (0);
3833d7072f8Seschrock }
3843d7072f8Seschrock
3853d7072f8Seschrock /*
3863d7072f8Seschrock * Given a physical device path, iterate over all (pool, vdev) pairs which
3873d7072f8Seschrock * correspond to the given path.
3883d7072f8Seschrock */
3893d7072f8Seschrock static boolean_t
devpath_iter(const char * devpath,zfs_process_func_t func,boolean_t wholedisk)3903d7072f8Seschrock devpath_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
3913d7072f8Seschrock {
3923d7072f8Seschrock dev_data_t data = { 0 };
3933d7072f8Seschrock
3943d7072f8Seschrock data.dd_compare = devpath;
3953d7072f8Seschrock data.dd_func = func;
3963d7072f8Seschrock data.dd_prop = ZPOOL_CONFIG_PHYS_PATH;
3973d7072f8Seschrock data.dd_found = B_FALSE;
3983d7072f8Seschrock data.dd_isdisk = wholedisk;
3993d7072f8Seschrock
4003d7072f8Seschrock (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
4013d7072f8Seschrock
4023d7072f8Seschrock return (data.dd_found);
4033d7072f8Seschrock }
4043d7072f8Seschrock
4053d7072f8Seschrock /*
4063d7072f8Seschrock * Given a /devices path, lookup the corresponding devid for each minor node,
4073d7072f8Seschrock * and find any vdevs with matching devids. Doing this straight up would be
4083d7072f8Seschrock * rather inefficient, O(minor nodes * vdevs in system), so we take advantage of
4093d7072f8Seschrock * the fact that each devid ends with "/<minornode>". Once we find any valid
4103d7072f8Seschrock * minor node, we chop off the portion after the last slash, and then search for
4113d7072f8Seschrock * matching vdevs, which is O(vdevs in system).
4123d7072f8Seschrock */
4133d7072f8Seschrock static boolean_t
devid_iter(const char * devpath,zfs_process_func_t func,boolean_t wholedisk)4143d7072f8Seschrock devid_iter(const char *devpath, zfs_process_func_t func, boolean_t wholedisk)
4153d7072f8Seschrock {
4163d7072f8Seschrock size_t len = strlen(devpath) + sizeof ("/devices") +
4173d7072f8Seschrock sizeof (PHYS_PATH) - 1;
4183d7072f8Seschrock char *fullpath;
4193d7072f8Seschrock int fd;
4203d7072f8Seschrock ddi_devid_t devid;
4213d7072f8Seschrock char *devidstr, *fulldevid;
4223d7072f8Seschrock dev_data_t data = { 0 };
4233d7072f8Seschrock
4243d7072f8Seschrock /*
4253d7072f8Seschrock * Try to open a known minor node.
4263d7072f8Seschrock */
4273d7072f8Seschrock fullpath = alloca(len);
4283d7072f8Seschrock (void) snprintf(fullpath, len, "/devices%s%s", devpath, PHYS_PATH);
4293d7072f8Seschrock if ((fd = open(fullpath, O_RDONLY)) < 0)
4303d7072f8Seschrock return (B_FALSE);
4313d7072f8Seschrock
4323d7072f8Seschrock /*
4333d7072f8Seschrock * Determine the devid as a string, with no trailing slash for the minor
4343d7072f8Seschrock * node.
4353d7072f8Seschrock */
4363d7072f8Seschrock if (devid_get(fd, &devid) != 0) {
4373d7072f8Seschrock (void) close(fd);
4383d7072f8Seschrock return (B_FALSE);
4393d7072f8Seschrock }
4403d7072f8Seschrock (void) close(fd);
4413d7072f8Seschrock
4423d7072f8Seschrock if ((devidstr = devid_str_encode(devid, NULL)) == NULL) {
4433d7072f8Seschrock devid_free(devid);
4443d7072f8Seschrock return (B_FALSE);
4453d7072f8Seschrock }
4463d7072f8Seschrock
4473d7072f8Seschrock len = strlen(devidstr) + 2;
4483d7072f8Seschrock fulldevid = alloca(len);
4493d7072f8Seschrock (void) snprintf(fulldevid, len, "%s/", devidstr);
4503d7072f8Seschrock
4513d7072f8Seschrock data.dd_compare = fulldevid;
4523d7072f8Seschrock data.dd_func = func;
4533d7072f8Seschrock data.dd_prop = ZPOOL_CONFIG_DEVID;
4543d7072f8Seschrock data.dd_found = B_FALSE;
4553d7072f8Seschrock data.dd_isdisk = wholedisk;
4563d7072f8Seschrock
4573d7072f8Seschrock (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
4583d7072f8Seschrock
4593d7072f8Seschrock devid_str_free(devidstr);
46025085d90SEric Taylor devid_free(devid);
4613d7072f8Seschrock
4623d7072f8Seschrock return (data.dd_found);
4633d7072f8Seschrock }
4643d7072f8Seschrock
4653d7072f8Seschrock /*
4663d7072f8Seschrock * This function is called when we receive a devfs add event. This can be
4673d7072f8Seschrock * either a disk event or a lofi event, and the behavior is slightly different
4683d7072f8Seschrock * depending on which it is.
4693d7072f8Seschrock */
4703d7072f8Seschrock static int
zfs_deliver_add(nvlist_t * nvl,boolean_t is_lofi)4713d7072f8Seschrock zfs_deliver_add(nvlist_t *nvl, boolean_t is_lofi)
4723d7072f8Seschrock {
4733d7072f8Seschrock char *devpath, *devname;
4743d7072f8Seschrock char path[PATH_MAX], realpath[PATH_MAX];
4753d7072f8Seschrock char *colon, *raw;
4763d7072f8Seschrock int ret;
4773d7072f8Seschrock
4783d7072f8Seschrock /*
4793d7072f8Seschrock * The main unit of operation is the physical device path. For disks,
4803d7072f8Seschrock * this is the device node, as all minor nodes are affected. For lofi
4813d7072f8Seschrock * devices, this includes the minor path. Unfortunately, this isn't
4823d7072f8Seschrock * represented in the DEV_PHYS_PATH for various reasons.
4833d7072f8Seschrock */
4843d7072f8Seschrock if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devpath) != 0)
4853d7072f8Seschrock return (-1);
4863d7072f8Seschrock
4873d7072f8Seschrock /*
4883d7072f8Seschrock * If this is a lofi device, then also get the minor instance name.
4893d7072f8Seschrock * Unfortunately, the current payload doesn't include an easy way to get
4903d7072f8Seschrock * this information. So we cheat by resolving the 'dev_name' (which
4913d7072f8Seschrock * refers to the raw device) and taking the portion between ':(*),raw'.
4923d7072f8Seschrock */
4933d7072f8Seschrock (void) strlcpy(realpath, devpath, sizeof (realpath));
4943d7072f8Seschrock if (is_lofi) {
4953d7072f8Seschrock if (nvlist_lookup_string(nvl, DEV_NAME,
4963d7072f8Seschrock &devname) == 0 &&
4973d7072f8Seschrock (ret = resolvepath(devname, path,
4983d7072f8Seschrock sizeof (path))) > 0) {
4993d7072f8Seschrock path[ret] = '\0';
5003d7072f8Seschrock colon = strchr(path, ':');
5013d7072f8Seschrock if (colon != NULL)
5023d7072f8Seschrock raw = strstr(colon + 1, ",raw");
5033d7072f8Seschrock if (colon != NULL && raw != NULL) {
5043d7072f8Seschrock *raw = '\0';
5053d7072f8Seschrock (void) snprintf(realpath,
5063d7072f8Seschrock sizeof (realpath), "%s%s",
5073d7072f8Seschrock devpath, colon);
5083d7072f8Seschrock *raw = ',';
5093d7072f8Seschrock }
5103d7072f8Seschrock }
5113d7072f8Seschrock }
5123d7072f8Seschrock
5133d7072f8Seschrock /*
5143d7072f8Seschrock * Iterate over all vdevs with a matching devid, and then those with a
5153d7072f8Seschrock * matching /devices path. For disks, we only want to pay attention to
5163d7072f8Seschrock * vdevs marked as whole disks. For lofi, we don't care (because we're
5173d7072f8Seschrock * matching an exact minor name).
5183d7072f8Seschrock */
5193d7072f8Seschrock if (!devid_iter(realpath, zfs_process_add, !is_lofi))
5203d7072f8Seschrock (void) devpath_iter(realpath, zfs_process_add, !is_lofi);
5213d7072f8Seschrock
5223d7072f8Seschrock return (0);
5233d7072f8Seschrock }
5243d7072f8Seschrock
5253d7072f8Seschrock /*
5263d7072f8Seschrock * Called when we receive a VDEV_CHECK event, which indicates a device could not
5273d7072f8Seschrock * be opened during initial pool open, but the autoreplace property was set on
5283d7072f8Seschrock * the pool. In this case, we treat it as if it were an add event.
5293d7072f8Seschrock */
5303d7072f8Seschrock static int
zfs_deliver_check(nvlist_t * nvl)5313d7072f8Seschrock zfs_deliver_check(nvlist_t *nvl)
5323d7072f8Seschrock {
5333d7072f8Seschrock dev_data_t data = { 0 };
5343d7072f8Seschrock
5353d7072f8Seschrock if (nvlist_lookup_uint64(nvl, ZFS_EV_POOL_GUID,
5363d7072f8Seschrock &data.dd_pool_guid) != 0 ||
5373d7072f8Seschrock nvlist_lookup_uint64(nvl, ZFS_EV_VDEV_GUID,
538efb4a871SYuri Pankov &data.dd_vdev_guid) != 0 ||
539efb4a871SYuri Pankov data.dd_vdev_guid == 0)
5403d7072f8Seschrock return (0);
5413d7072f8Seschrock
5423d7072f8Seschrock data.dd_isdisk = B_TRUE;
5433d7072f8Seschrock data.dd_func = zfs_process_add;
5443d7072f8Seschrock
5453d7072f8Seschrock (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
5463d7072f8Seschrock
5473d7072f8Seschrock return (0);
5483d7072f8Seschrock }
5493d7072f8Seschrock
550b98131cfSEric Taylor #define DEVICE_PREFIX "/devices"
551b98131cfSEric Taylor
552b98131cfSEric Taylor static int
zfsdle_vdev_online(zpool_handle_t * zhp,void * data)553b98131cfSEric Taylor zfsdle_vdev_online(zpool_handle_t *zhp, void *data)
554b98131cfSEric Taylor {
555b98131cfSEric Taylor char *devname = data;
556b98131cfSEric Taylor boolean_t avail_spare, l2cache;
557b98131cfSEric Taylor vdev_state_t newstate;
558b98131cfSEric Taylor nvlist_t *tgt;
559b98131cfSEric Taylor
5606f0e4dc9SAndy Fiddaman syseventd_print(9, "%s: searching for %s in pool %s\n", __func__,
561b98131cfSEric Taylor devname, zpool_get_name(zhp));
562b98131cfSEric Taylor
563b98131cfSEric Taylor if ((tgt = zpool_find_vdev_by_physpath(zhp, devname,
564b98131cfSEric Taylor &avail_spare, &l2cache, NULL)) != NULL) {
565b98131cfSEric Taylor char *path, fullpath[MAXPATHLEN];
566b98131cfSEric Taylor uint64_t wholedisk = 0ULL;
567b98131cfSEric Taylor
568*b4fb0039SJoshua M. Clulow /*
569*b4fb0039SJoshua M. Clulow * If the /dev path of the device is invalid because the disk
570*b4fb0039SJoshua M. Clulow * has been moved to a new location, we need to try to refresh
571*b4fb0039SJoshua M. Clulow * that path before onlining the device.
572*b4fb0039SJoshua M. Clulow */
573*b4fb0039SJoshua M. Clulow zpool_vdev_refresh_path(g_zfshdl, zhp, tgt);
574*b4fb0039SJoshua M. Clulow
575b98131cfSEric Taylor verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
576b98131cfSEric Taylor &path) == 0);
577b98131cfSEric Taylor verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
578b98131cfSEric Taylor &wholedisk) == 0);
579b98131cfSEric Taylor
5806f0e4dc9SAndy Fiddaman syseventd_print(9, "%s: "
5816f0e4dc9SAndy Fiddaman "found %s in pool %s (wholedisk: %s)\n", __func__,
5826f0e4dc9SAndy Fiddaman path, zpool_get_name(zhp),
5836f0e4dc9SAndy Fiddaman wholedisk != 0 ? "true" : "false");
5846f0e4dc9SAndy Fiddaman
585b98131cfSEric Taylor (void) strlcpy(fullpath, path, sizeof (fullpath));
5864263d13fSGeorge Wilson if (wholedisk) {
587b98131cfSEric Taylor fullpath[strlen(fullpath) - 2] = '\0';
588b98131cfSEric Taylor
5894263d13fSGeorge Wilson /*
5904263d13fSGeorge Wilson * We need to reopen the pool associated with this
5914263d13fSGeorge Wilson * device so that the kernel can update the size
5924263d13fSGeorge Wilson * of the expanded device.
5934263d13fSGeorge Wilson */
5944263d13fSGeorge Wilson (void) zpool_reopen(zhp);
5954263d13fSGeorge Wilson }
5964263d13fSGeorge Wilson
597b98131cfSEric Taylor if (zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
5986f0e4dc9SAndy Fiddaman syseventd_print(9, "%s: "
5996f0e4dc9SAndy Fiddaman "setting device %s to ONLINE state in pool %s.\n",
6006f0e4dc9SAndy Fiddaman __func__, fullpath, zpool_get_name(zhp));
6016f0e4dc9SAndy Fiddaman if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL) {
602b98131cfSEric Taylor (void) zpool_vdev_online(zhp, fullpath, 0,
603b98131cfSEric Taylor &newstate);
6046f0e4dc9SAndy Fiddaman }
605b98131cfSEric Taylor }
60625085d90SEric Taylor zpool_close(zhp);
607b98131cfSEric Taylor return (1);
608b98131cfSEric Taylor }
60925085d90SEric Taylor zpool_close(zhp);
610b98131cfSEric Taylor return (0);
611b98131cfSEric Taylor }
612b98131cfSEric Taylor
61314372834SHans Rosenfeld /*
61414372834SHans Rosenfeld * This function is called for each vdev of a pool for which any of the
61514372834SHans Rosenfeld * following events was recieved:
61614372834SHans Rosenfeld * - ESC_ZFS_vdev_add
61714372834SHans Rosenfeld * - ESC_ZFS_vdev_attach
61814372834SHans Rosenfeld * - ESC_ZFS_vdev_clear
61914372834SHans Rosenfeld * - ESC_ZFS_vdev_online
62014372834SHans Rosenfeld * - ESC_ZFS_pool_create
62114372834SHans Rosenfeld * - ESC_ZFS_pool_import
62214372834SHans Rosenfeld * It will update the vdevs FRU property if it is out of date.
62314372834SHans Rosenfeld */
62414372834SHans Rosenfeld /*ARGSUSED2*/
62514372834SHans Rosenfeld static void
zfs_update_vdev_fru(zpool_handle_t * zhp,nvlist_t * vdev,boolean_t isdisk)62614372834SHans Rosenfeld zfs_update_vdev_fru(zpool_handle_t *zhp, nvlist_t *vdev, boolean_t isdisk)
62714372834SHans Rosenfeld {
62814372834SHans Rosenfeld char *devpath, *cptr, *oldfru = NULL;
62914372834SHans Rosenfeld const char *newfru;
63014372834SHans Rosenfeld uint64_t vdev_guid;
63114372834SHans Rosenfeld
63214372834SHans Rosenfeld (void) nvlist_lookup_uint64(vdev, ZPOOL_CONFIG_GUID, &vdev_guid);
63314372834SHans Rosenfeld (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_PHYS_PATH, &devpath);
63414372834SHans Rosenfeld (void) nvlist_lookup_string(vdev, ZPOOL_CONFIG_FRU, &oldfru);
63514372834SHans Rosenfeld
63614372834SHans Rosenfeld /* remove :<slice> from devpath */
63714372834SHans Rosenfeld cptr = strrchr(devpath, ':');
63814372834SHans Rosenfeld if (cptr != NULL)
63914372834SHans Rosenfeld *cptr = '\0';
64014372834SHans Rosenfeld
64114372834SHans Rosenfeld newfru = libzfs_fru_lookup(g_zfshdl, devpath);
64214372834SHans Rosenfeld if (newfru == NULL) {
64314372834SHans Rosenfeld syseventd_print(9, "zfs_update_vdev_fru: no FRU for %s\n",
64414372834SHans Rosenfeld devpath);
64514372834SHans Rosenfeld return;
64614372834SHans Rosenfeld }
64714372834SHans Rosenfeld
64814372834SHans Rosenfeld /* do nothing if the FRU hasn't changed */
64914372834SHans Rosenfeld if (oldfru != NULL && libzfs_fru_compare(g_zfshdl, oldfru, newfru)) {
65014372834SHans Rosenfeld syseventd_print(9, "zfs_update_vdev_fru: FRU unchanged\n");
65114372834SHans Rosenfeld return;
65214372834SHans Rosenfeld }
65314372834SHans Rosenfeld
65414372834SHans Rosenfeld syseventd_print(9, "zfs_update_vdev_fru: devpath = %s\n", devpath);
65514372834SHans Rosenfeld syseventd_print(9, "zfs_update_vdev_fru: FRU = %s\n", newfru);
65614372834SHans Rosenfeld
65714372834SHans Rosenfeld (void) zpool_fru_set(zhp, vdev_guid, newfru);
65814372834SHans Rosenfeld }
65914372834SHans Rosenfeld
66014372834SHans Rosenfeld /*
66114372834SHans Rosenfeld * This function handles the following events:
66214372834SHans Rosenfeld * - ESC_ZFS_vdev_add
66314372834SHans Rosenfeld * - ESC_ZFS_vdev_attach
66414372834SHans Rosenfeld * - ESC_ZFS_vdev_clear
66514372834SHans Rosenfeld * - ESC_ZFS_vdev_online
66614372834SHans Rosenfeld * - ESC_ZFS_pool_create
66714372834SHans Rosenfeld * - ESC_ZFS_pool_import
66814372834SHans Rosenfeld * It will iterate over the pool vdevs to update the FRU property.
66914372834SHans Rosenfeld */
67014372834SHans Rosenfeld int
zfs_deliver_update(nvlist_t * nvl)67114372834SHans Rosenfeld zfs_deliver_update(nvlist_t *nvl)
67214372834SHans Rosenfeld {
67314372834SHans Rosenfeld dev_data_t dd = { 0 };
67414372834SHans Rosenfeld char *pname;
67514372834SHans Rosenfeld zpool_handle_t *zhp;
67614372834SHans Rosenfeld nvlist_t *config, *vdev;
67714372834SHans Rosenfeld
67814372834SHans Rosenfeld if (nvlist_lookup_string(nvl, "pool_name", &pname) != 0) {
67914372834SHans Rosenfeld syseventd_print(9, "zfs_deliver_update: no pool name\n");
68014372834SHans Rosenfeld return (-1);
68114372834SHans Rosenfeld }
68214372834SHans Rosenfeld
68314372834SHans Rosenfeld /*
68414372834SHans Rosenfeld * If this event was triggered by a pool export or destroy we cannot
68514372834SHans Rosenfeld * open the pool. This is not an error, just return 0 as we don't care
68614372834SHans Rosenfeld * about these events.
68714372834SHans Rosenfeld */
68814372834SHans Rosenfeld zhp = zpool_open_canfail(g_zfshdl, pname);
68914372834SHans Rosenfeld if (zhp == NULL)
69014372834SHans Rosenfeld return (0);
69114372834SHans Rosenfeld
69214372834SHans Rosenfeld config = zpool_get_config(zhp, NULL);
69314372834SHans Rosenfeld if (config == NULL) {
69414372834SHans Rosenfeld syseventd_print(9, "zfs_deliver_update: "
69514372834SHans Rosenfeld "failed to get pool config for %s\n", pname);
69614372834SHans Rosenfeld zpool_close(zhp);
69714372834SHans Rosenfeld return (-1);
69814372834SHans Rosenfeld }
69914372834SHans Rosenfeld
70014372834SHans Rosenfeld if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vdev) != 0) {
70114372834SHans Rosenfeld syseventd_print(0, "zfs_deliver_update: "
70214372834SHans Rosenfeld "failed to get vdev tree for %s\n", pname);
70314372834SHans Rosenfeld zpool_close(zhp);
70414372834SHans Rosenfeld return (-1);
70514372834SHans Rosenfeld }
70614372834SHans Rosenfeld
70714372834SHans Rosenfeld libzfs_fru_refresh(g_zfshdl);
70814372834SHans Rosenfeld
70914372834SHans Rosenfeld dd.dd_func = zfs_update_vdev_fru;
71014372834SHans Rosenfeld zfs_iter_vdev(zhp, vdev, &dd);
71114372834SHans Rosenfeld
71214372834SHans Rosenfeld zpool_close(zhp);
71314372834SHans Rosenfeld return (0);
71414372834SHans Rosenfeld }
71514372834SHans Rosenfeld
716b98131cfSEric Taylor int
zfs_deliver_dle(nvlist_t * nvl)717b98131cfSEric Taylor zfs_deliver_dle(nvlist_t *nvl)
718b98131cfSEric Taylor {
719b98131cfSEric Taylor char *devname;
720b98131cfSEric Taylor if (nvlist_lookup_string(nvl, DEV_PHYS_PATH, &devname) != 0) {
721b98131cfSEric Taylor syseventd_print(9, "zfs_deliver_event: no physpath\n");
722b98131cfSEric Taylor return (-1);
723b98131cfSEric Taylor }
724b98131cfSEric Taylor if (strncmp(devname, DEVICE_PREFIX, strlen(DEVICE_PREFIX)) != 0) {
725b98131cfSEric Taylor syseventd_print(9, "zfs_deliver_event: invalid "
726b98131cfSEric Taylor "device '%s'", devname);
727b98131cfSEric Taylor return (-1);
728b98131cfSEric Taylor }
729b98131cfSEric Taylor
730b98131cfSEric Taylor /*
731b98131cfSEric Taylor * We try to find the device using the physical
732b98131cfSEric Taylor * path that has been supplied. We need to strip off
733b98131cfSEric Taylor * the /devices prefix before starting our search.
734b98131cfSEric Taylor */
735b98131cfSEric Taylor devname += strlen(DEVICE_PREFIX);
736b98131cfSEric Taylor if (zpool_iter(g_zfshdl, zfsdle_vdev_online, devname) != 1) {
737b98131cfSEric Taylor syseventd_print(9, "zfs_deliver_event: device '%s' not"
738b98131cfSEric Taylor " found\n", devname);
739b98131cfSEric Taylor return (1);
740b98131cfSEric Taylor }
741b98131cfSEric Taylor return (0);
742b98131cfSEric Taylor }
743b98131cfSEric Taylor
744b98131cfSEric Taylor
7453d7072f8Seschrock /*ARGSUSED*/
7463d7072f8Seschrock static int
zfs_deliver_event(sysevent_t * ev,int unused)7473d7072f8Seschrock zfs_deliver_event(sysevent_t *ev, int unused)
7483d7072f8Seschrock {
7493d7072f8Seschrock const char *class = sysevent_get_class_name(ev);
7503d7072f8Seschrock const char *subclass = sysevent_get_subclass_name(ev);
7513d7072f8Seschrock nvlist_t *nvl;
7523d7072f8Seschrock int ret;
75314372834SHans Rosenfeld boolean_t is_lofi = B_FALSE, is_check = B_FALSE;
75414372834SHans Rosenfeld boolean_t is_dle = B_FALSE, is_update = B_FALSE;
7553d7072f8Seschrock
7563d7072f8Seschrock if (strcmp(class, EC_DEV_ADD) == 0) {
7573d7072f8Seschrock /*
7583d7072f8Seschrock * We're mainly interested in disk additions, but we also listen
7593d7072f8Seschrock * for new lofi devices, to allow for simplified testing.
7603d7072f8Seschrock */
7613d7072f8Seschrock if (strcmp(subclass, ESC_DISK) == 0)
7623d7072f8Seschrock is_lofi = B_FALSE;
7633d7072f8Seschrock else if (strcmp(subclass, ESC_LOFI) == 0)
7643d7072f8Seschrock is_lofi = B_TRUE;
7653d7072f8Seschrock else
7663d7072f8Seschrock return (0);
7673d7072f8Seschrock
7683d7072f8Seschrock is_check = B_FALSE;
76914372834SHans Rosenfeld } else if (strcmp(class, EC_ZFS) == 0) {
77014372834SHans Rosenfeld if (strcmp(subclass, ESC_ZFS_VDEV_CHECK) == 0) {
77114372834SHans Rosenfeld /*
77214372834SHans Rosenfeld * This event signifies that a device failed to open
77314372834SHans Rosenfeld * during pool load, but the 'autoreplace' property was
77414372834SHans Rosenfeld * set, so we should pretend it's just been added.
77514372834SHans Rosenfeld */
77614372834SHans Rosenfeld is_check = B_TRUE;
77714372834SHans Rosenfeld } else if ((strcmp(subclass, ESC_ZFS_VDEV_ADD) == 0) ||
77814372834SHans Rosenfeld (strcmp(subclass, ESC_ZFS_VDEV_ATTACH) == 0) ||
77914372834SHans Rosenfeld (strcmp(subclass, ESC_ZFS_VDEV_CLEAR) == 0) ||
78014372834SHans Rosenfeld (strcmp(subclass, ESC_ZFS_VDEV_ONLINE) == 0) ||
78114372834SHans Rosenfeld (strcmp(subclass, ESC_ZFS_POOL_CREATE) == 0) ||
78214372834SHans Rosenfeld (strcmp(subclass, ESC_ZFS_POOL_IMPORT) == 0)) {
78314372834SHans Rosenfeld /*
78414372834SHans Rosenfeld * When we receive these events we check the pool
78514372834SHans Rosenfeld * configuration and update the vdev FRUs if necessary.
78614372834SHans Rosenfeld */
78714372834SHans Rosenfeld is_update = B_TRUE;
78814372834SHans Rosenfeld }
789b98131cfSEric Taylor } else if (strcmp(class, EC_DEV_STATUS) == 0 &&
790b98131cfSEric Taylor strcmp(subclass, ESC_DEV_DLE) == 0) {
791b98131cfSEric Taylor is_dle = B_TRUE;
7923d7072f8Seschrock } else {
7933d7072f8Seschrock return (0);
7943d7072f8Seschrock }
7953d7072f8Seschrock
7963d7072f8Seschrock if (sysevent_get_attr_list(ev, &nvl) != 0)
7973d7072f8Seschrock return (-1);
7983d7072f8Seschrock
799b98131cfSEric Taylor if (is_dle)
800b98131cfSEric Taylor ret = zfs_deliver_dle(nvl);
80114372834SHans Rosenfeld else if (is_update)
80214372834SHans Rosenfeld ret = zfs_deliver_update(nvl);
803b98131cfSEric Taylor else if (is_check)
8043d7072f8Seschrock ret = zfs_deliver_check(nvl);
8053d7072f8Seschrock else
8063d7072f8Seschrock ret = zfs_deliver_add(nvl, is_lofi);
8073d7072f8Seschrock
8083d7072f8Seschrock nvlist_free(nvl);
8093d7072f8Seschrock return (ret);
8103d7072f8Seschrock }
8113d7072f8Seschrock
81237e3a0d8SEric Taylor /*ARGSUSED*/
81337e3a0d8SEric Taylor void *
zfs_enum_pools(void * arg)81437e3a0d8SEric Taylor zfs_enum_pools(void *arg)
81537e3a0d8SEric Taylor {
81637e3a0d8SEric Taylor (void) zpool_iter(g_zfshdl, zfs_unavail_pool, (void *)&g_pool_list);
81737e3a0d8SEric Taylor if (!list_is_empty(&g_pool_list))
81837e3a0d8SEric Taylor g_tpool = tpool_create(1, sysconf(_SC_NPROCESSORS_ONLN),
81937e3a0d8SEric Taylor 0, NULL);
82037e3a0d8SEric Taylor g_enumeration_done = B_TRUE;
82137e3a0d8SEric Taylor return (NULL);
82237e3a0d8SEric Taylor }
82337e3a0d8SEric Taylor
8243d7072f8Seschrock static struct slm_mod_ops zfs_mod_ops = {
8253d7072f8Seschrock SE_MAJOR_VERSION, SE_MINOR_VERSION, 10, zfs_deliver_event
8263d7072f8Seschrock };
8273d7072f8Seschrock
8283d7072f8Seschrock struct slm_mod_ops *
slm_init()8293d7072f8Seschrock slm_init()
8303d7072f8Seschrock {
8313d7072f8Seschrock if ((g_zfshdl = libzfs_init()) == NULL)
8323d7072f8Seschrock return (NULL);
83337e3a0d8SEric Taylor /*
83437e3a0d8SEric Taylor * collect a list of unavailable pools (asynchronously,
83537e3a0d8SEric Taylor * since this can take a while)
83637e3a0d8SEric Taylor */
8373c112a2bSEric Taylor list_create(&g_pool_list, sizeof (struct unavailpool),
8383c112a2bSEric Taylor offsetof(struct unavailpool, uap_node));
83937e3a0d8SEric Taylor if (thr_create(NULL, 0, zfs_enum_pools, NULL, 0, &g_zfs_tid) != 0)
84037e3a0d8SEric Taylor return (NULL);
8413d7072f8Seschrock return (&zfs_mod_ops);
8423d7072f8Seschrock }
8433d7072f8Seschrock
8443d7072f8Seschrock void
slm_fini()8453d7072f8Seschrock slm_fini()
8463d7072f8Seschrock {
8473c112a2bSEric Taylor unavailpool_t *pool;
8483c112a2bSEric Taylor
849cead1df3SHans Rosenfeld (void) thr_join(g_zfs_tid, NULL, NULL);
85037e3a0d8SEric Taylor if (g_tpool != NULL) {
8513c112a2bSEric Taylor tpool_wait(g_tpool);
8523c112a2bSEric Taylor tpool_destroy(g_tpool);
8533c112a2bSEric Taylor }
8543c112a2bSEric Taylor while ((pool = (list_head(&g_pool_list))) != NULL) {
8553c112a2bSEric Taylor list_remove(&g_pool_list, pool);
8563c112a2bSEric Taylor zpool_close(pool->uap_zhp);
8573c112a2bSEric Taylor free(pool);
8583c112a2bSEric Taylor }
8593c112a2bSEric Taylor list_destroy(&g_pool_list);
86025085d90SEric Taylor libzfs_fini(g_zfshdl);
8613d7072f8Seschrock }
862