1fa9e4066Sahrens /* 2fa9e4066Sahrens * CDDL HEADER START 3fa9e4066Sahrens * 4fa9e4066Sahrens * The contents of this file are subject to the terms of the 5ea8dc4b6Seschrock * Common Development and Distribution License (the "License"). 6ea8dc4b6Seschrock * You may not use this file except in compliance with the License. 7fa9e4066Sahrens * 8fa9e4066Sahrens * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9fa9e4066Sahrens * or http://www.opensolaris.org/os/licensing. 10fa9e4066Sahrens * See the License for the specific language governing permissions 11fa9e4066Sahrens * and limitations under the License. 12fa9e4066Sahrens * 13fa9e4066Sahrens * When distributing Covered Code, include this CDDL HEADER in each 14fa9e4066Sahrens * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15fa9e4066Sahrens * If applicable, add the following below this CDDL HEADER, with the 16fa9e4066Sahrens * fields enclosed by brackets "[]" replaced with your own identifying 17fa9e4066Sahrens * information: Portions Copyright [yyyy] [name of copyright owner] 18fa9e4066Sahrens * 19fa9e4066Sahrens * CDDL HEADER END 20fa9e4066Sahrens */ 21fa9e4066Sahrens /* 2239c23413Seschrock * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23fa9e4066Sahrens * Use is subject to license terms. 24fa9e4066Sahrens */ 25fa9e4066Sahrens 26fa9e4066Sahrens #pragma ident "%Z%%M% %I% %E% SMI" 27fa9e4066Sahrens 28fa9e4066Sahrens /* 29fa9e4066Sahrens * Pool import support functions. 30fa9e4066Sahrens * 31fa9e4066Sahrens * To import a pool, we rely on reading the configuration information from the 32fa9e4066Sahrens * ZFS label of each device. If we successfully read the label, then we 33fa9e4066Sahrens * organize the configuration information in the following hierarchy: 34fa9e4066Sahrens * 35fa9e4066Sahrens * pool guid -> toplevel vdev guid -> label txg 36fa9e4066Sahrens * 37fa9e4066Sahrens * Duplicate entries matching this same tuple will be discarded. Once we have 38fa9e4066Sahrens * examined every device, we pick the best label txg config for each toplevel 39fa9e4066Sahrens * vdev. We then arrange these toplevel vdevs into a complete pool config, and 40fa9e4066Sahrens * update any paths that have changed. Finally, we attempt to import the pool 41fa9e4066Sahrens * using our derived config, and record the results. 42fa9e4066Sahrens */ 43fa9e4066Sahrens 44fa9e4066Sahrens #include <devid.h> 45fa9e4066Sahrens #include <dirent.h> 46fa9e4066Sahrens #include <errno.h> 47fa9e4066Sahrens #include <libintl.h> 48fa9e4066Sahrens #include <stdlib.h> 49fa9e4066Sahrens #include <string.h> 50fa9e4066Sahrens #include <sys/stat.h> 51fa9e4066Sahrens #include <unistd.h> 52fa9e4066Sahrens #include <fcntl.h> 53fa9e4066Sahrens 54fa9e4066Sahrens #include <sys/vdev_impl.h> 55fa9e4066Sahrens 56fa9e4066Sahrens #include "libzfs.h" 57fa9e4066Sahrens #include "libzfs_impl.h" 58fa9e4066Sahrens 59fa9e4066Sahrens /* 60fa9e4066Sahrens * Intermediate structures used to gather configuration information. 61fa9e4066Sahrens */ 62fa9e4066Sahrens typedef struct config_entry { 63fa9e4066Sahrens uint64_t ce_txg; 64fa9e4066Sahrens nvlist_t *ce_config; 65fa9e4066Sahrens struct config_entry *ce_next; 66fa9e4066Sahrens } config_entry_t; 67fa9e4066Sahrens 68fa9e4066Sahrens typedef struct vdev_entry { 69fa9e4066Sahrens uint64_t ve_guid; 70fa9e4066Sahrens config_entry_t *ve_configs; 71fa9e4066Sahrens struct vdev_entry *ve_next; 72fa9e4066Sahrens } vdev_entry_t; 73fa9e4066Sahrens 74fa9e4066Sahrens typedef struct pool_entry { 75fa9e4066Sahrens uint64_t pe_guid; 76fa9e4066Sahrens vdev_entry_t *pe_vdevs; 77fa9e4066Sahrens struct pool_entry *pe_next; 78fa9e4066Sahrens } pool_entry_t; 79fa9e4066Sahrens 80fa9e4066Sahrens typedef struct name_entry { 8199653d4eSeschrock char *ne_name; 82fa9e4066Sahrens uint64_t ne_guid; 83fa9e4066Sahrens struct name_entry *ne_next; 84fa9e4066Sahrens } name_entry_t; 85fa9e4066Sahrens 86fa9e4066Sahrens typedef struct pool_list { 87fa9e4066Sahrens pool_entry_t *pools; 88fa9e4066Sahrens name_entry_t *names; 89fa9e4066Sahrens } pool_list_t; 90fa9e4066Sahrens 91fa9e4066Sahrens static char * 92fa9e4066Sahrens get_devid(const char *path) 93fa9e4066Sahrens { 94fa9e4066Sahrens int fd; 95fa9e4066Sahrens ddi_devid_t devid; 96fa9e4066Sahrens char *minor, *ret; 97fa9e4066Sahrens 98fa9e4066Sahrens if ((fd = open(path, O_RDONLY)) < 0) 99fa9e4066Sahrens return (NULL); 100fa9e4066Sahrens 101fa9e4066Sahrens minor = NULL; 102fa9e4066Sahrens ret = NULL; 103fa9e4066Sahrens if (devid_get(fd, &devid) == 0) { 104fa9e4066Sahrens if (devid_get_minor_name(fd, &minor) == 0) 105fa9e4066Sahrens ret = devid_str_encode(devid, minor); 106fa9e4066Sahrens if (minor != NULL) 107fa9e4066Sahrens devid_str_free(minor); 108fa9e4066Sahrens devid_free(devid); 109fa9e4066Sahrens } 110c67d9675Seschrock (void) close(fd); 111fa9e4066Sahrens 112fa9e4066Sahrens return (ret); 113fa9e4066Sahrens } 114fa9e4066Sahrens 115fa9e4066Sahrens 116fa9e4066Sahrens /* 117fa9e4066Sahrens * Go through and fix up any path and/or devid information for the given vdev 118fa9e4066Sahrens * configuration. 119fa9e4066Sahrens */ 12099653d4eSeschrock static int 121fa9e4066Sahrens fix_paths(nvlist_t *nv, name_entry_t *names) 122fa9e4066Sahrens { 123fa9e4066Sahrens nvlist_t **child; 124fa9e4066Sahrens uint_t c, children; 125fa9e4066Sahrens uint64_t guid; 126c67d9675Seschrock name_entry_t *ne, *best; 127c67d9675Seschrock char *path, *devid; 128c67d9675Seschrock int matched; 129fa9e4066Sahrens 130fa9e4066Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 131fa9e4066Sahrens &child, &children) == 0) { 132fa9e4066Sahrens for (c = 0; c < children; c++) 13399653d4eSeschrock if (fix_paths(child[c], names) != 0) 13499653d4eSeschrock return (-1); 13599653d4eSeschrock return (0); 136fa9e4066Sahrens } 137fa9e4066Sahrens 138fa9e4066Sahrens /* 139fa9e4066Sahrens * This is a leaf (file or disk) vdev. In either case, go through 140fa9e4066Sahrens * the name list and see if we find a matching guid. If so, replace 141fa9e4066Sahrens * the path and see if we can calculate a new devid. 142c67d9675Seschrock * 143c67d9675Seschrock * There may be multiple names associated with a particular guid, in 144c67d9675Seschrock * which case we have overlapping slices or multiple paths to the same 145c67d9675Seschrock * disk. If this is the case, then we want to pick the path that is 146c67d9675Seschrock * the most similar to the original, where "most similar" is the number 147c67d9675Seschrock * of matching characters starting from the end of the path. This will 148c67d9675Seschrock * preserve slice numbers even if the disks have been reorganized, and 149c67d9675Seschrock * will also catch preferred disk names if multiple paths exist. 150fa9e4066Sahrens */ 151fa9e4066Sahrens verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0); 152c67d9675Seschrock if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0) 153c67d9675Seschrock path = NULL; 154c67d9675Seschrock 155c67d9675Seschrock matched = 0; 156c67d9675Seschrock best = NULL; 157c67d9675Seschrock for (ne = names; ne != NULL; ne = ne->ne_next) { 158c67d9675Seschrock if (ne->ne_guid == guid) { 159c67d9675Seschrock const char *src, *dst; 160c67d9675Seschrock int count; 161c67d9675Seschrock 162c67d9675Seschrock if (path == NULL) { 163c67d9675Seschrock best = ne; 164c67d9675Seschrock break; 165c67d9675Seschrock } 166c67d9675Seschrock 167c67d9675Seschrock src = ne->ne_name + strlen(ne->ne_name) - 1; 168c67d9675Seschrock dst = path + strlen(path) - 1; 169c67d9675Seschrock for (count = 0; src >= ne->ne_name && dst >= path; 170c67d9675Seschrock src--, dst--, count++) 171c67d9675Seschrock if (*src != *dst) 172c67d9675Seschrock break; 173c67d9675Seschrock 174c67d9675Seschrock /* 175c67d9675Seschrock * At this point, 'count' is the number of characters 176c67d9675Seschrock * matched from the end. 177c67d9675Seschrock */ 178c67d9675Seschrock if (count > matched || best == NULL) { 179c67d9675Seschrock best = ne; 180c67d9675Seschrock matched = count; 181c67d9675Seschrock } 182c67d9675Seschrock } 183c67d9675Seschrock } 184fa9e4066Sahrens 185c67d9675Seschrock if (best == NULL) 18699653d4eSeschrock return (0); 187fa9e4066Sahrens 18899653d4eSeschrock if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0) 18999653d4eSeschrock return (-1); 190fa9e4066Sahrens 191c67d9675Seschrock if ((devid = get_devid(best->ne_name)) == NULL) { 192fa9e4066Sahrens (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID); 193fa9e4066Sahrens } else { 19499653d4eSeschrock if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) 19599653d4eSeschrock return (-1); 196fa9e4066Sahrens devid_str_free(devid); 197fa9e4066Sahrens } 19899653d4eSeschrock 19999653d4eSeschrock return (0); 200fa9e4066Sahrens } 201fa9e4066Sahrens 202fa9e4066Sahrens /* 203fa9e4066Sahrens * Add the given configuration to the list of known devices. 204fa9e4066Sahrens */ 20599653d4eSeschrock static int 20699653d4eSeschrock add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path, 20799653d4eSeschrock nvlist_t *config) 208fa9e4066Sahrens { 20999653d4eSeschrock uint64_t pool_guid, vdev_guid, top_guid, txg, state; 210fa9e4066Sahrens pool_entry_t *pe; 211fa9e4066Sahrens vdev_entry_t *ve; 212fa9e4066Sahrens config_entry_t *ce; 213fa9e4066Sahrens name_entry_t *ne; 214fa9e4066Sahrens 21599653d4eSeschrock /* 21699653d4eSeschrock * If this is a hot spare not currently in use, add it to the list of 21799653d4eSeschrock * names to translate, but don't do anything else. 21899653d4eSeschrock */ 21999653d4eSeschrock if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 22099653d4eSeschrock &state) == 0 && state == POOL_STATE_SPARE && 22199653d4eSeschrock nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) { 22299653d4eSeschrock if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 223ccae0b50Seschrock return (-1); 22499653d4eSeschrock 22599653d4eSeschrock if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 22699653d4eSeschrock free(ne); 22799653d4eSeschrock return (-1); 22899653d4eSeschrock } 22999653d4eSeschrock ne->ne_guid = vdev_guid; 23099653d4eSeschrock ne->ne_next = pl->names; 23199653d4eSeschrock pl->names = ne; 23299653d4eSeschrock return (0); 23399653d4eSeschrock } 23499653d4eSeschrock 235fa9e4066Sahrens /* 236fa9e4066Sahrens * If we have a valid config but cannot read any of these fields, then 237fa9e4066Sahrens * it means we have a half-initialized label. In vdev_label_init() 238fa9e4066Sahrens * we write a label with txg == 0 so that we can identify the device 239fa9e4066Sahrens * in case the user refers to the same disk later on. If we fail to 240fa9e4066Sahrens * create the pool, we'll be left with a label in this state 241fa9e4066Sahrens * which should not be considered part of a valid pool. 242fa9e4066Sahrens */ 243fa9e4066Sahrens if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 244fa9e4066Sahrens &pool_guid) != 0 || 245fa9e4066Sahrens nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 246fa9e4066Sahrens &vdev_guid) != 0 || 247fa9e4066Sahrens nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID, 248fa9e4066Sahrens &top_guid) != 0 || 249fa9e4066Sahrens nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 250fa9e4066Sahrens &txg) != 0 || txg == 0) { 251fa9e4066Sahrens nvlist_free(config); 25299653d4eSeschrock return (0); 253fa9e4066Sahrens } 254fa9e4066Sahrens 255fa9e4066Sahrens /* 256fa9e4066Sahrens * First, see if we know about this pool. If not, then add it to the 257fa9e4066Sahrens * list of known pools. 258fa9e4066Sahrens */ 259fa9e4066Sahrens for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 260fa9e4066Sahrens if (pe->pe_guid == pool_guid) 261fa9e4066Sahrens break; 262fa9e4066Sahrens } 263fa9e4066Sahrens 264fa9e4066Sahrens if (pe == NULL) { 26599653d4eSeschrock if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) { 26699653d4eSeschrock nvlist_free(config); 26799653d4eSeschrock return (-1); 26899653d4eSeschrock } 269fa9e4066Sahrens pe->pe_guid = pool_guid; 270fa9e4066Sahrens pe->pe_next = pl->pools; 271fa9e4066Sahrens pl->pools = pe; 272fa9e4066Sahrens } 273fa9e4066Sahrens 274fa9e4066Sahrens /* 275fa9e4066Sahrens * Second, see if we know about this toplevel vdev. Add it if its 276fa9e4066Sahrens * missing. 277fa9e4066Sahrens */ 278fa9e4066Sahrens for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 279fa9e4066Sahrens if (ve->ve_guid == top_guid) 280fa9e4066Sahrens break; 281fa9e4066Sahrens } 282fa9e4066Sahrens 283fa9e4066Sahrens if (ve == NULL) { 28499653d4eSeschrock if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) { 28599653d4eSeschrock nvlist_free(config); 28699653d4eSeschrock return (-1); 28799653d4eSeschrock } 288fa9e4066Sahrens ve->ve_guid = top_guid; 289fa9e4066Sahrens ve->ve_next = pe->pe_vdevs; 290fa9e4066Sahrens pe->pe_vdevs = ve; 291fa9e4066Sahrens } 292fa9e4066Sahrens 293fa9e4066Sahrens /* 294fa9e4066Sahrens * Third, see if we have a config with a matching transaction group. If 295fa9e4066Sahrens * so, then we do nothing. Otherwise, add it to the list of known 296fa9e4066Sahrens * configs. 297fa9e4066Sahrens */ 298fa9e4066Sahrens for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) { 299fa9e4066Sahrens if (ce->ce_txg == txg) 300fa9e4066Sahrens break; 301fa9e4066Sahrens } 302fa9e4066Sahrens 303fa9e4066Sahrens if (ce == NULL) { 30499653d4eSeschrock if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) { 30599653d4eSeschrock nvlist_free(config); 30699653d4eSeschrock return (-1); 30799653d4eSeschrock } 308fa9e4066Sahrens ce->ce_txg = txg; 309fa9e4066Sahrens ce->ce_config = config; 310fa9e4066Sahrens ce->ce_next = ve->ve_configs; 311fa9e4066Sahrens ve->ve_configs = ce; 312fa9e4066Sahrens } else { 313fa9e4066Sahrens nvlist_free(config); 314fa9e4066Sahrens } 315fa9e4066Sahrens 316fa9e4066Sahrens /* 317fa9e4066Sahrens * At this point we've successfully added our config to the list of 318fa9e4066Sahrens * known configs. The last thing to do is add the vdev guid -> path 319fa9e4066Sahrens * mappings so that we can fix up the configuration as necessary before 320fa9e4066Sahrens * doing the import. 321fa9e4066Sahrens */ 32299653d4eSeschrock if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL) 32399653d4eSeschrock return (-1); 32499653d4eSeschrock 32599653d4eSeschrock if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) { 32699653d4eSeschrock free(ne); 32799653d4eSeschrock return (-1); 32899653d4eSeschrock } 329fa9e4066Sahrens 330fa9e4066Sahrens ne->ne_guid = vdev_guid; 331fa9e4066Sahrens ne->ne_next = pl->names; 332fa9e4066Sahrens pl->names = ne; 33399653d4eSeschrock 33499653d4eSeschrock return (0); 335fa9e4066Sahrens } 336fa9e4066Sahrens 337eaca9bbdSeschrock /* 338eaca9bbdSeschrock * Returns true if the named pool matches the given GUID. 339eaca9bbdSeschrock */ 34094de1d4cSeschrock static int 34194de1d4cSeschrock pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid, 34294de1d4cSeschrock boolean_t *isactive) 343eaca9bbdSeschrock { 344eaca9bbdSeschrock zpool_handle_t *zhp; 345eaca9bbdSeschrock uint64_t theguid; 346eaca9bbdSeschrock 34794de1d4cSeschrock if (zpool_open_silent(hdl, name, &zhp) != 0) 34894de1d4cSeschrock return (-1); 34994de1d4cSeschrock 35094de1d4cSeschrock if (zhp == NULL) { 35194de1d4cSeschrock *isactive = B_FALSE; 35294de1d4cSeschrock return (0); 35394de1d4cSeschrock } 354eaca9bbdSeschrock 355eaca9bbdSeschrock verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID, 356eaca9bbdSeschrock &theguid) == 0); 357eaca9bbdSeschrock 358eaca9bbdSeschrock zpool_close(zhp); 359eaca9bbdSeschrock 36094de1d4cSeschrock *isactive = (theguid == guid); 36194de1d4cSeschrock return (0); 362eaca9bbdSeschrock } 363eaca9bbdSeschrock 364*2f8aaab3Seschrock static nvlist_t * 365*2f8aaab3Seschrock refresh_config(libzfs_handle_t *hdl, nvlist_t *config) 366*2f8aaab3Seschrock { 367*2f8aaab3Seschrock nvlist_t *nvl; 368*2f8aaab3Seschrock zfs_cmd_t zc = { 0 }; 369*2f8aaab3Seschrock int err; 370*2f8aaab3Seschrock 371*2f8aaab3Seschrock if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) 372*2f8aaab3Seschrock return (NULL); 373*2f8aaab3Seschrock 374*2f8aaab3Seschrock if (zcmd_alloc_dst_nvlist(hdl, &zc, 375*2f8aaab3Seschrock zc.zc_nvlist_conf_size * 2) != 0) { 376*2f8aaab3Seschrock zcmd_free_nvlists(&zc); 377*2f8aaab3Seschrock return (NULL); 378*2f8aaab3Seschrock } 379*2f8aaab3Seschrock 380*2f8aaab3Seschrock while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT, 381*2f8aaab3Seschrock &zc)) != 0 && errno == ENOMEM) { 382*2f8aaab3Seschrock if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 383*2f8aaab3Seschrock zcmd_free_nvlists(&zc); 384*2f8aaab3Seschrock return (NULL); 385*2f8aaab3Seschrock } 386*2f8aaab3Seschrock } 387*2f8aaab3Seschrock 388*2f8aaab3Seschrock if (err) { 389*2f8aaab3Seschrock (void) zpool_standard_error(hdl, errno, 390*2f8aaab3Seschrock dgettext(TEXT_DOMAIN, "cannot discover pools")); 391*2f8aaab3Seschrock zcmd_free_nvlists(&zc); 392*2f8aaab3Seschrock return (NULL); 393*2f8aaab3Seschrock } 394*2f8aaab3Seschrock 395*2f8aaab3Seschrock if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) { 396*2f8aaab3Seschrock zcmd_free_nvlists(&zc); 397*2f8aaab3Seschrock return (NULL); 398*2f8aaab3Seschrock } 399*2f8aaab3Seschrock 400*2f8aaab3Seschrock zcmd_free_nvlists(&zc); 401*2f8aaab3Seschrock return (nvl); 402*2f8aaab3Seschrock } 403*2f8aaab3Seschrock 404fa9e4066Sahrens /* 405fa9e4066Sahrens * Convert our list of pools into the definitive set of configurations. We 406fa9e4066Sahrens * start by picking the best config for each toplevel vdev. Once that's done, 407fa9e4066Sahrens * we assemble the toplevel vdevs into a full config for the pool. We make a 408fa9e4066Sahrens * pass to fix up any incorrect paths, and then add it to the main list to 409fa9e4066Sahrens * return to the user. 410fa9e4066Sahrens */ 411fa9e4066Sahrens static nvlist_t * 41299653d4eSeschrock get_configs(libzfs_handle_t *hdl, pool_list_t *pl) 413fa9e4066Sahrens { 41499653d4eSeschrock pool_entry_t *pe; 41599653d4eSeschrock vdev_entry_t *ve; 41699653d4eSeschrock config_entry_t *ce; 41799653d4eSeschrock nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot; 41899653d4eSeschrock nvlist_t **spares; 41999653d4eSeschrock uint_t i, nspares; 42099653d4eSeschrock boolean_t config_seen; 421fa9e4066Sahrens uint64_t best_txg; 42295173954Sek char *name, *hostname; 42399653d4eSeschrock uint64_t version, guid; 42499653d4eSeschrock uint_t children = 0; 42599653d4eSeschrock nvlist_t **child = NULL; 42699653d4eSeschrock uint_t c; 42794de1d4cSeschrock boolean_t isactive; 42895173954Sek uint64_t hostid; 429*2f8aaab3Seschrock nvlist_t *nvl; 430fa9e4066Sahrens 43199653d4eSeschrock if (nvlist_alloc(&ret, 0, 0) != 0) 43299653d4eSeschrock goto nomem; 433fa9e4066Sahrens 43499653d4eSeschrock for (pe = pl->pools; pe != NULL; pe = pe->pe_next) { 435fa9e4066Sahrens uint64_t id; 436fa9e4066Sahrens 43799653d4eSeschrock if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0) 43899653d4eSeschrock goto nomem; 43999653d4eSeschrock config_seen = B_FALSE; 440fa9e4066Sahrens 441fa9e4066Sahrens /* 442fa9e4066Sahrens * Iterate over all toplevel vdevs. Grab the pool configuration 443fa9e4066Sahrens * from the first one we find, and then go through the rest and 444fa9e4066Sahrens * add them as necessary to the 'vdevs' member of the config. 445fa9e4066Sahrens */ 44699653d4eSeschrock for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) { 447fa9e4066Sahrens 448fa9e4066Sahrens /* 449fa9e4066Sahrens * Determine the best configuration for this vdev by 450fa9e4066Sahrens * selecting the config with the latest transaction 451fa9e4066Sahrens * group. 452fa9e4066Sahrens */ 453fa9e4066Sahrens best_txg = 0; 454fa9e4066Sahrens for (ce = ve->ve_configs; ce != NULL; 455fa9e4066Sahrens ce = ce->ce_next) { 456fa9e4066Sahrens 45799653d4eSeschrock if (ce->ce_txg > best_txg) { 458fa9e4066Sahrens tmp = ce->ce_config; 45999653d4eSeschrock best_txg = ce->ce_txg; 46099653d4eSeschrock } 461fa9e4066Sahrens } 462fa9e4066Sahrens 463fa9e4066Sahrens if (!config_seen) { 464fa9e4066Sahrens /* 465fa9e4066Sahrens * Copy the relevant pieces of data to the pool 466fa9e4066Sahrens * configuration: 467fa9e4066Sahrens * 46899653d4eSeschrock * version 469fa9e4066Sahrens * pool guid 470fa9e4066Sahrens * name 471fa9e4066Sahrens * pool state 47295173954Sek * hostid (if available) 47395173954Sek * hostname (if available) 474fa9e4066Sahrens */ 475fa9e4066Sahrens uint64_t state; 476fa9e4066Sahrens 47799653d4eSeschrock verify(nvlist_lookup_uint64(tmp, 47899653d4eSeschrock ZPOOL_CONFIG_VERSION, &version) == 0); 47999653d4eSeschrock if (nvlist_add_uint64(config, 48099653d4eSeschrock ZPOOL_CONFIG_VERSION, version) != 0) 48199653d4eSeschrock goto nomem; 482fa9e4066Sahrens verify(nvlist_lookup_uint64(tmp, 483fa9e4066Sahrens ZPOOL_CONFIG_POOL_GUID, &guid) == 0); 48499653d4eSeschrock if (nvlist_add_uint64(config, 48599653d4eSeschrock ZPOOL_CONFIG_POOL_GUID, guid) != 0) 48699653d4eSeschrock goto nomem; 487fa9e4066Sahrens verify(nvlist_lookup_string(tmp, 488fa9e4066Sahrens ZPOOL_CONFIG_POOL_NAME, &name) == 0); 48999653d4eSeschrock if (nvlist_add_string(config, 49099653d4eSeschrock ZPOOL_CONFIG_POOL_NAME, name) != 0) 49199653d4eSeschrock goto nomem; 492fa9e4066Sahrens verify(nvlist_lookup_uint64(tmp, 493fa9e4066Sahrens ZPOOL_CONFIG_POOL_STATE, &state) == 0); 49499653d4eSeschrock if (nvlist_add_uint64(config, 49599653d4eSeschrock ZPOOL_CONFIG_POOL_STATE, state) != 0) 49699653d4eSeschrock goto nomem; 49795173954Sek hostid = 0; 49895173954Sek if (nvlist_lookup_uint64(tmp, 49995173954Sek ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 50095173954Sek if (nvlist_add_uint64(config, 50195173954Sek ZPOOL_CONFIG_HOSTID, hostid) != 0) 50295173954Sek goto nomem; 50395173954Sek verify(nvlist_lookup_string(tmp, 50495173954Sek ZPOOL_CONFIG_HOSTNAME, 50595173954Sek &hostname) == 0); 50695173954Sek if (nvlist_add_string(config, 50795173954Sek ZPOOL_CONFIG_HOSTNAME, 50895173954Sek hostname) != 0) 50995173954Sek goto nomem; 51095173954Sek } 511fa9e4066Sahrens 51299653d4eSeschrock config_seen = B_TRUE; 513fa9e4066Sahrens } 514fa9e4066Sahrens 515fa9e4066Sahrens /* 516fa9e4066Sahrens * Add this top-level vdev to the child array. 517fa9e4066Sahrens */ 518fa9e4066Sahrens verify(nvlist_lookup_nvlist(tmp, 519fa9e4066Sahrens ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0); 520fa9e4066Sahrens verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID, 521fa9e4066Sahrens &id) == 0); 522fa9e4066Sahrens if (id >= children) { 523fa9e4066Sahrens nvlist_t **newchild; 524fa9e4066Sahrens 52599653d4eSeschrock newchild = zfs_alloc(hdl, (id + 1) * 526fa9e4066Sahrens sizeof (nvlist_t *)); 52799653d4eSeschrock if (newchild == NULL) 52899653d4eSeschrock goto nomem; 529fa9e4066Sahrens 530fa9e4066Sahrens for (c = 0; c < children; c++) 531fa9e4066Sahrens newchild[c] = child[c]; 532fa9e4066Sahrens 533fa9e4066Sahrens free(child); 534fa9e4066Sahrens child = newchild; 535fa9e4066Sahrens children = id + 1; 536fa9e4066Sahrens } 53799653d4eSeschrock if (nvlist_dup(nvtop, &child[id], 0) != 0) 53899653d4eSeschrock goto nomem; 539fa9e4066Sahrens 540fa9e4066Sahrens } 541fa9e4066Sahrens 542fa9e4066Sahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 543fa9e4066Sahrens &guid) == 0); 544fa9e4066Sahrens 545fa9e4066Sahrens /* 546fa9e4066Sahrens * Look for any missing top-level vdevs. If this is the case, 547fa9e4066Sahrens * create a faked up 'missing' vdev as a placeholder. We cannot 548fa9e4066Sahrens * simply compress the child array, because the kernel performs 549fa9e4066Sahrens * certain checks to make sure the vdev IDs match their location 550fa9e4066Sahrens * in the configuration. 551fa9e4066Sahrens */ 552fa9e4066Sahrens for (c = 0; c < children; c++) 553fa9e4066Sahrens if (child[c] == NULL) { 554fa9e4066Sahrens nvlist_t *missing; 55599653d4eSeschrock if (nvlist_alloc(&missing, NV_UNIQUE_NAME, 55699653d4eSeschrock 0) != 0) 55799653d4eSeschrock goto nomem; 55899653d4eSeschrock if (nvlist_add_string(missing, 55999653d4eSeschrock ZPOOL_CONFIG_TYPE, 56099653d4eSeschrock VDEV_TYPE_MISSING) != 0 || 56199653d4eSeschrock nvlist_add_uint64(missing, 56299653d4eSeschrock ZPOOL_CONFIG_ID, c) != 0 || 56399653d4eSeschrock nvlist_add_uint64(missing, 56499653d4eSeschrock ZPOOL_CONFIG_GUID, 0ULL) != 0) { 56599653d4eSeschrock nvlist_free(missing); 56699653d4eSeschrock goto nomem; 56799653d4eSeschrock } 568fa9e4066Sahrens child[c] = missing; 569fa9e4066Sahrens } 570fa9e4066Sahrens 571fa9e4066Sahrens /* 572fa9e4066Sahrens * Put all of this pool's top-level vdevs into a root vdev. 573fa9e4066Sahrens */ 57499653d4eSeschrock if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0) 57599653d4eSeschrock goto nomem; 57699653d4eSeschrock if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 57799653d4eSeschrock VDEV_TYPE_ROOT) != 0 || 57899653d4eSeschrock nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 || 57999653d4eSeschrock nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 || 58099653d4eSeschrock nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 58199653d4eSeschrock child, children) != 0) { 58299653d4eSeschrock nvlist_free(nvroot); 58399653d4eSeschrock goto nomem; 58499653d4eSeschrock } 585fa9e4066Sahrens 586fa9e4066Sahrens for (c = 0; c < children; c++) 587fa9e4066Sahrens nvlist_free(child[c]); 588fa9e4066Sahrens free(child); 58999653d4eSeschrock children = 0; 59099653d4eSeschrock child = NULL; 591fa9e4066Sahrens 592fa9e4066Sahrens /* 593fa9e4066Sahrens * Go through and fix up any paths and/or devids based on our 594fa9e4066Sahrens * known list of vdev GUID -> path mappings. 595fa9e4066Sahrens */ 59699653d4eSeschrock if (fix_paths(nvroot, pl->names) != 0) { 59799653d4eSeschrock nvlist_free(nvroot); 59899653d4eSeschrock goto nomem; 59999653d4eSeschrock } 600fa9e4066Sahrens 601fa9e4066Sahrens /* 602fa9e4066Sahrens * Add the root vdev to this pool's configuration. 603fa9e4066Sahrens */ 60499653d4eSeschrock if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 60599653d4eSeschrock nvroot) != 0) { 60699653d4eSeschrock nvlist_free(nvroot); 60799653d4eSeschrock goto nomem; 60899653d4eSeschrock } 609fa9e4066Sahrens nvlist_free(nvroot); 610fa9e4066Sahrens 611fa9e4066Sahrens /* 612fa9e4066Sahrens * Determine if this pool is currently active, in which case we 613fa9e4066Sahrens * can't actually import it. 614fa9e4066Sahrens */ 615fa9e4066Sahrens verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 616fa9e4066Sahrens &name) == 0); 617fa9e4066Sahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 618fa9e4066Sahrens &guid) == 0); 619fa9e4066Sahrens 62094de1d4cSeschrock if (pool_active(hdl, name, guid, &isactive) != 0) 62194de1d4cSeschrock goto error; 62294de1d4cSeschrock 6230192a278Seschrock if (isactive) { 624fa9e4066Sahrens nvlist_free(config); 62599653d4eSeschrock config = NULL; 626fa9e4066Sahrens continue; 627fa9e4066Sahrens } 628fa9e4066Sahrens 629*2f8aaab3Seschrock if ((nvl = refresh_config(hdl, config)) == NULL) 630e9dbad6fSeschrock goto error; 631fa9e4066Sahrens 632fa9e4066Sahrens nvlist_free(config); 633*2f8aaab3Seschrock config = nvl; 634fa9e4066Sahrens 63599653d4eSeschrock /* 63699653d4eSeschrock * Go through and update the paths for spares, now that we have 63799653d4eSeschrock * them. 63899653d4eSeschrock */ 63999653d4eSeschrock verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 64099653d4eSeschrock &nvroot) == 0); 64199653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 64299653d4eSeschrock &spares, &nspares) == 0) { 64399653d4eSeschrock for (i = 0; i < nspares; i++) { 64499653d4eSeschrock if (fix_paths(spares[i], pl->names) != 0) 64599653d4eSeschrock goto nomem; 64699653d4eSeschrock } 64799653d4eSeschrock } 64899653d4eSeschrock 64995173954Sek /* 65095173954Sek * Restore the original information read from the actual label. 65195173954Sek */ 65295173954Sek (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID, 65395173954Sek DATA_TYPE_UINT64); 65495173954Sek (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME, 65595173954Sek DATA_TYPE_STRING); 65695173954Sek if (hostid != 0) { 65795173954Sek verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, 65895173954Sek hostid) == 0); 65995173954Sek verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, 66095173954Sek hostname) == 0); 66195173954Sek } 66295173954Sek 663fa9e4066Sahrens /* 664fa9e4066Sahrens * Add this pool to the list of configs. 665fa9e4066Sahrens */ 666e9dbad6fSeschrock verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 667e9dbad6fSeschrock &name) == 0); 66899653d4eSeschrock if (nvlist_add_nvlist(ret, name, config) != 0) 66999653d4eSeschrock goto nomem; 670fa9e4066Sahrens 671fa9e4066Sahrens nvlist_free(config); 67299653d4eSeschrock config = NULL; 673fa9e4066Sahrens } 674fa9e4066Sahrens 675fa9e4066Sahrens return (ret); 67699653d4eSeschrock 67799653d4eSeschrock nomem: 67899653d4eSeschrock (void) no_memory(hdl); 67999653d4eSeschrock error: 68094de1d4cSeschrock nvlist_free(config); 68194de1d4cSeschrock nvlist_free(ret); 68299653d4eSeschrock for (c = 0; c < children; c++) 68399653d4eSeschrock nvlist_free(child[c]); 68494de1d4cSeschrock free(child); 68599653d4eSeschrock 68699653d4eSeschrock return (NULL); 687fa9e4066Sahrens } 688fa9e4066Sahrens 689fa9e4066Sahrens /* 690fa9e4066Sahrens * Return the offset of the given label. 691fa9e4066Sahrens */ 692fa9e4066Sahrens static uint64_t 693e7437265Sahrens label_offset(uint64_t size, int l) 694fa9e4066Sahrens { 695e7437265Sahrens ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0); 696fa9e4066Sahrens return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ? 697fa9e4066Sahrens 0 : size - VDEV_LABELS * sizeof (vdev_label_t))); 698fa9e4066Sahrens } 699fa9e4066Sahrens 700fa9e4066Sahrens /* 701fa9e4066Sahrens * Given a file descriptor, read the label information and return an nvlist 702fa9e4066Sahrens * describing the configuration, if there is one. 703fa9e4066Sahrens */ 70499653d4eSeschrock int 70599653d4eSeschrock zpool_read_label(int fd, nvlist_t **config) 706fa9e4066Sahrens { 707fa9e4066Sahrens struct stat64 statbuf; 708fa9e4066Sahrens int l; 709fa9e4066Sahrens vdev_label_t *label; 710e7437265Sahrens uint64_t state, txg, size; 711fa9e4066Sahrens 71299653d4eSeschrock *config = NULL; 71399653d4eSeschrock 714fa9e4066Sahrens if (fstat64(fd, &statbuf) == -1) 71599653d4eSeschrock return (0); 716e7437265Sahrens size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t); 717fa9e4066Sahrens 71899653d4eSeschrock if ((label = malloc(sizeof (vdev_label_t))) == NULL) 71999653d4eSeschrock return (-1); 720fa9e4066Sahrens 721fa9e4066Sahrens for (l = 0; l < VDEV_LABELS; l++) { 722fa9e4066Sahrens if (pread(fd, label, sizeof (vdev_label_t), 723e7437265Sahrens label_offset(size, l)) != sizeof (vdev_label_t)) 724fa9e4066Sahrens continue; 725fa9e4066Sahrens 726fa9e4066Sahrens if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 72799653d4eSeschrock sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) 728fa9e4066Sahrens continue; 729fa9e4066Sahrens 73099653d4eSeschrock if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 73199653d4eSeschrock &state) != 0 || state > POOL_STATE_SPARE) { 73299653d4eSeschrock nvlist_free(*config); 733fa9e4066Sahrens continue; 734fa9e4066Sahrens } 735fa9e4066Sahrens 73699653d4eSeschrock if (state != POOL_STATE_SPARE && 73799653d4eSeschrock (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 73899653d4eSeschrock &txg) != 0 || txg == 0)) { 73999653d4eSeschrock nvlist_free(*config); 740fa9e4066Sahrens continue; 741fa9e4066Sahrens } 742fa9e4066Sahrens 743fa9e4066Sahrens free(label); 74499653d4eSeschrock return (0); 745fa9e4066Sahrens } 746fa9e4066Sahrens 747fa9e4066Sahrens free(label); 74899653d4eSeschrock *config = NULL; 74999653d4eSeschrock return (0); 750fa9e4066Sahrens } 751fa9e4066Sahrens 752fa9e4066Sahrens /* 753fa9e4066Sahrens * Given a list of directories to search, find all pools stored on disk. This 754fa9e4066Sahrens * includes partial pools which are not available to import. If no args are 755fa9e4066Sahrens * given (argc is 0), then the default directory (/dev/dsk) is searched. 756fa9e4066Sahrens */ 757fa9e4066Sahrens nvlist_t * 75899653d4eSeschrock zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv) 759fa9e4066Sahrens { 760fa9e4066Sahrens int i; 761ccae0b50Seschrock DIR *dirp = NULL; 762fa9e4066Sahrens struct dirent64 *dp; 763fa9e4066Sahrens char path[MAXPATHLEN]; 764fa9e4066Sahrens struct stat64 statbuf; 76599653d4eSeschrock nvlist_t *ret = NULL, *config; 766fa9e4066Sahrens static char *default_dir = "/dev/dsk"; 767fa9e4066Sahrens int fd; 768fa9e4066Sahrens pool_list_t pools = { 0 }; 76999653d4eSeschrock pool_entry_t *pe, *penext; 77099653d4eSeschrock vdev_entry_t *ve, *venext; 77199653d4eSeschrock config_entry_t *ce, *cenext; 77299653d4eSeschrock name_entry_t *ne, *nenext; 77399653d4eSeschrock 774fa9e4066Sahrens 775fa9e4066Sahrens if (argc == 0) { 776fa9e4066Sahrens argc = 1; 777fa9e4066Sahrens argv = &default_dir; 778fa9e4066Sahrens } 779fa9e4066Sahrens 780fa9e4066Sahrens /* 781fa9e4066Sahrens * Go through and read the label configuration information from every 782fa9e4066Sahrens * possible device, organizing the information according to pool GUID 783fa9e4066Sahrens * and toplevel GUID. 784fa9e4066Sahrens */ 785fa9e4066Sahrens for (i = 0; i < argc; i++) { 786fa9e4066Sahrens if (argv[i][0] != '/') { 787ece3d9b3Slling (void) zfs_error_fmt(hdl, EZFS_BADPATH, 78899653d4eSeschrock dgettext(TEXT_DOMAIN, "cannot open '%s'"), 789fa9e4066Sahrens argv[i]); 79099653d4eSeschrock goto error; 791fa9e4066Sahrens } 792fa9e4066Sahrens 793fa9e4066Sahrens if ((dirp = opendir(argv[i])) == NULL) { 79499653d4eSeschrock zfs_error_aux(hdl, strerror(errno)); 795ece3d9b3Slling (void) zfs_error_fmt(hdl, EZFS_BADPATH, 79699653d4eSeschrock dgettext(TEXT_DOMAIN, "cannot open '%s'"), 79799653d4eSeschrock argv[i]); 79899653d4eSeschrock goto error; 799fa9e4066Sahrens } 800fa9e4066Sahrens 801fa9e4066Sahrens /* 802fa9e4066Sahrens * This is not MT-safe, but we have no MT consumers of libzfs 803fa9e4066Sahrens */ 804fa9e4066Sahrens while ((dp = readdir64(dirp)) != NULL) { 805fa9e4066Sahrens 806fa9e4066Sahrens (void) snprintf(path, sizeof (path), "%s/%s", 807fa9e4066Sahrens argv[i], dp->d_name); 808fa9e4066Sahrens 809fa9e4066Sahrens if (stat64(path, &statbuf) != 0) 810fa9e4066Sahrens continue; 811fa9e4066Sahrens 812fa9e4066Sahrens /* 813fa9e4066Sahrens * Ignore directories (which includes "." and ".."). 814fa9e4066Sahrens */ 815fa9e4066Sahrens if (S_ISDIR(statbuf.st_mode)) 816fa9e4066Sahrens continue; 8173bb79becSeschrock 8183bb79becSeschrock /* 8193bb79becSeschrock * Ignore special (non-character or non-block) files. 8203bb79becSeschrock */ 8213bb79becSeschrock if (!S_ISREG(statbuf.st_mode) && 8223bb79becSeschrock !S_ISBLK(statbuf.st_mode)) 8233bb79becSeschrock continue; 824fa9e4066Sahrens 825fa9e4066Sahrens if ((fd = open64(path, O_RDONLY)) < 0) 826fa9e4066Sahrens continue; 827fa9e4066Sahrens 82899653d4eSeschrock if ((zpool_read_label(fd, &config)) != 0) { 829ccae0b50Seschrock (void) close(fd); 83099653d4eSeschrock (void) no_memory(hdl); 83199653d4eSeschrock goto error; 83299653d4eSeschrock } 833fa9e4066Sahrens 834fa9e4066Sahrens (void) close(fd); 835fa9e4066Sahrens 836fa9e4066Sahrens if (config != NULL) 83799653d4eSeschrock if (add_config(hdl, &pools, path, config) != 0) 83899653d4eSeschrock goto error; 839fa9e4066Sahrens } 840ccae0b50Seschrock 841ccae0b50Seschrock (void) closedir(dirp); 842ccae0b50Seschrock dirp = NULL; 843fa9e4066Sahrens } 844fa9e4066Sahrens 84599653d4eSeschrock ret = get_configs(hdl, &pools); 84699653d4eSeschrock 84799653d4eSeschrock error: 84899653d4eSeschrock for (pe = pools.pools; pe != NULL; pe = penext) { 84999653d4eSeschrock penext = pe->pe_next; 85099653d4eSeschrock for (ve = pe->pe_vdevs; ve != NULL; ve = venext) { 85199653d4eSeschrock venext = ve->ve_next; 85299653d4eSeschrock for (ce = ve->ve_configs; ce != NULL; ce = cenext) { 85399653d4eSeschrock cenext = ce->ce_next; 85499653d4eSeschrock if (ce->ce_config) 85599653d4eSeschrock nvlist_free(ce->ce_config); 85699653d4eSeschrock free(ce); 85799653d4eSeschrock } 85899653d4eSeschrock free(ve); 85999653d4eSeschrock } 86099653d4eSeschrock free(pe); 86199653d4eSeschrock } 86299653d4eSeschrock 86399653d4eSeschrock for (ne = pools.names; ne != NULL; ne = nenext) { 86499653d4eSeschrock nenext = ne->ne_next; 86599653d4eSeschrock if (ne->ne_name) 86699653d4eSeschrock free(ne->ne_name); 86799653d4eSeschrock free(ne); 86899653d4eSeschrock } 86999653d4eSeschrock 870ccae0b50Seschrock if (dirp) 871ccae0b50Seschrock (void) closedir(dirp); 872fa9e4066Sahrens 873fa9e4066Sahrens return (ret); 874fa9e4066Sahrens } 875fa9e4066Sahrens 876*2f8aaab3Seschrock /* 877*2f8aaab3Seschrock * Given a cache file, return the contents as a list of importable pools. 878*2f8aaab3Seschrock */ 879*2f8aaab3Seschrock nvlist_t * 880*2f8aaab3Seschrock zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile) 881*2f8aaab3Seschrock { 882*2f8aaab3Seschrock char *buf; 883*2f8aaab3Seschrock int fd; 884*2f8aaab3Seschrock struct stat64 statbuf; 885*2f8aaab3Seschrock nvlist_t *raw, *src, *dst; 886*2f8aaab3Seschrock nvlist_t *pools; 887*2f8aaab3Seschrock nvpair_t *elem; 888*2f8aaab3Seschrock char *name; 889*2f8aaab3Seschrock uint64_t guid; 890*2f8aaab3Seschrock boolean_t active; 891*2f8aaab3Seschrock 892*2f8aaab3Seschrock if ((fd = open(cachefile, O_RDONLY)) < 0) { 893*2f8aaab3Seschrock zfs_error_aux(hdl, "%s", strerror(errno)); 894*2f8aaab3Seschrock (void) zfs_error(hdl, EZFS_BADCACHE, 895*2f8aaab3Seschrock dgettext(TEXT_DOMAIN, "failed to open cache file")); 896*2f8aaab3Seschrock return (NULL); 897*2f8aaab3Seschrock } 898*2f8aaab3Seschrock 899*2f8aaab3Seschrock if (fstat64(fd, &statbuf) != 0) { 900*2f8aaab3Seschrock zfs_error_aux(hdl, "%s", strerror(errno)); 901*2f8aaab3Seschrock (void) close(fd); 902*2f8aaab3Seschrock (void) zfs_error(hdl, EZFS_BADCACHE, 903*2f8aaab3Seschrock dgettext(TEXT_DOMAIN, "failed to get size of cache file")); 904*2f8aaab3Seschrock return (NULL); 905*2f8aaab3Seschrock } 906*2f8aaab3Seschrock 907*2f8aaab3Seschrock if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) { 908*2f8aaab3Seschrock (void) close(fd); 909*2f8aaab3Seschrock return (NULL); 910*2f8aaab3Seschrock } 911*2f8aaab3Seschrock 912*2f8aaab3Seschrock if (read(fd, buf, statbuf.st_size) != statbuf.st_size) { 913*2f8aaab3Seschrock (void) close(fd); 914*2f8aaab3Seschrock free(buf); 915*2f8aaab3Seschrock (void) zfs_error(hdl, EZFS_BADCACHE, 916*2f8aaab3Seschrock dgettext(TEXT_DOMAIN, 917*2f8aaab3Seschrock "failed to read cache file contents")); 918*2f8aaab3Seschrock return (NULL); 919*2f8aaab3Seschrock } 920*2f8aaab3Seschrock 921*2f8aaab3Seschrock (void) close(fd); 922*2f8aaab3Seschrock 923*2f8aaab3Seschrock if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) { 924*2f8aaab3Seschrock free(buf); 925*2f8aaab3Seschrock (void) zfs_error(hdl, EZFS_BADCACHE, 926*2f8aaab3Seschrock dgettext(TEXT_DOMAIN, 927*2f8aaab3Seschrock "invalid or corrupt cache file contents")); 928*2f8aaab3Seschrock return (NULL); 929*2f8aaab3Seschrock } 930*2f8aaab3Seschrock 931*2f8aaab3Seschrock free(buf); 932*2f8aaab3Seschrock 933*2f8aaab3Seschrock /* 934*2f8aaab3Seschrock * Go through and get the current state of the pools and refresh their 935*2f8aaab3Seschrock * state. 936*2f8aaab3Seschrock */ 937*2f8aaab3Seschrock if (nvlist_alloc(&pools, 0, 0) != 0) { 938*2f8aaab3Seschrock (void) no_memory(hdl); 939*2f8aaab3Seschrock nvlist_free(raw); 940*2f8aaab3Seschrock return (NULL); 941*2f8aaab3Seschrock } 942*2f8aaab3Seschrock 943*2f8aaab3Seschrock elem = NULL; 944*2f8aaab3Seschrock while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) { 945*2f8aaab3Seschrock verify(nvpair_value_nvlist(elem, &src) == 0); 946*2f8aaab3Seschrock 947*2f8aaab3Seschrock verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME, 948*2f8aaab3Seschrock &name) == 0); 949*2f8aaab3Seschrock verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID, 950*2f8aaab3Seschrock &guid) == 0); 951*2f8aaab3Seschrock 952*2f8aaab3Seschrock if (pool_active(hdl, name, guid, &active) != 0) { 953*2f8aaab3Seschrock nvlist_free(raw); 954*2f8aaab3Seschrock nvlist_free(pools); 955*2f8aaab3Seschrock return (NULL); 956*2f8aaab3Seschrock } 957*2f8aaab3Seschrock 958*2f8aaab3Seschrock if (active) 959*2f8aaab3Seschrock continue; 960*2f8aaab3Seschrock 961*2f8aaab3Seschrock if ((dst = refresh_config(hdl, src)) == NULL) { 962*2f8aaab3Seschrock nvlist_free(raw); 963*2f8aaab3Seschrock nvlist_free(pools); 964*2f8aaab3Seschrock return (NULL); 965*2f8aaab3Seschrock } 966*2f8aaab3Seschrock 967*2f8aaab3Seschrock if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) { 968*2f8aaab3Seschrock (void) no_memory(hdl); 969*2f8aaab3Seschrock nvlist_free(dst); 970*2f8aaab3Seschrock nvlist_free(raw); 971*2f8aaab3Seschrock nvlist_free(pools); 972*2f8aaab3Seschrock return (NULL); 973*2f8aaab3Seschrock } 974*2f8aaab3Seschrock 975*2f8aaab3Seschrock nvlist_free(dst); 976*2f8aaab3Seschrock } 977*2f8aaab3Seschrock 978*2f8aaab3Seschrock nvlist_free(raw); 979*2f8aaab3Seschrock return (pools); 980*2f8aaab3Seschrock } 981*2f8aaab3Seschrock 982*2f8aaab3Seschrock 98399653d4eSeschrock boolean_t 984fa9e4066Sahrens find_guid(nvlist_t *nv, uint64_t guid) 985fa9e4066Sahrens { 986fa9e4066Sahrens uint64_t tmp; 987fa9e4066Sahrens nvlist_t **child; 988fa9e4066Sahrens uint_t c, children; 989fa9e4066Sahrens 990fa9e4066Sahrens verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0); 991fa9e4066Sahrens if (tmp == guid) 99299653d4eSeschrock return (B_TRUE); 993fa9e4066Sahrens 994fa9e4066Sahrens if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 995fa9e4066Sahrens &child, &children) == 0) { 996fa9e4066Sahrens for (c = 0; c < children; c++) 997fa9e4066Sahrens if (find_guid(child[c], guid)) 99899653d4eSeschrock return (B_TRUE); 99999653d4eSeschrock } 100099653d4eSeschrock 100199653d4eSeschrock return (B_FALSE); 100299653d4eSeschrock } 100399653d4eSeschrock 100499653d4eSeschrock typedef struct spare_cbdata { 100599653d4eSeschrock uint64_t cb_guid; 100699653d4eSeschrock zpool_handle_t *cb_zhp; 100799653d4eSeschrock } spare_cbdata_t; 100899653d4eSeschrock 100999653d4eSeschrock static int 101099653d4eSeschrock find_spare(zpool_handle_t *zhp, void *data) 101199653d4eSeschrock { 101299653d4eSeschrock spare_cbdata_t *cbp = data; 101399653d4eSeschrock nvlist_t **spares; 101499653d4eSeschrock uint_t i, nspares; 101599653d4eSeschrock uint64_t guid; 101699653d4eSeschrock nvlist_t *nvroot; 101799653d4eSeschrock 101899653d4eSeschrock verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 101999653d4eSeschrock &nvroot) == 0); 102099653d4eSeschrock 102199653d4eSeschrock if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 102299653d4eSeschrock &spares, &nspares) == 0) { 102399653d4eSeschrock for (i = 0; i < nspares; i++) { 102499653d4eSeschrock verify(nvlist_lookup_uint64(spares[i], 102599653d4eSeschrock ZPOOL_CONFIG_GUID, &guid) == 0); 102699653d4eSeschrock if (guid == cbp->cb_guid) { 102799653d4eSeschrock cbp->cb_zhp = zhp; 102899653d4eSeschrock return (1); 102999653d4eSeschrock } 103099653d4eSeschrock } 1031fa9e4066Sahrens } 1032fa9e4066Sahrens 103399653d4eSeschrock zpool_close(zhp); 103499653d4eSeschrock return (0); 1035fa9e4066Sahrens } 1036fa9e4066Sahrens 1037fa9e4066Sahrens /* 103899653d4eSeschrock * Determines if the pool is in use. If so, it returns true and the state of 1039fa9e4066Sahrens * the pool as well as the name of the pool. Both strings are allocated and 1040fa9e4066Sahrens * must be freed by the caller. 1041fa9e4066Sahrens */ 1042fa9e4066Sahrens int 104399653d4eSeschrock zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr, 104499653d4eSeschrock boolean_t *inuse) 1045fa9e4066Sahrens { 1046fa9e4066Sahrens nvlist_t *config; 1047fa9e4066Sahrens char *name; 104899653d4eSeschrock boolean_t ret; 1049fa9e4066Sahrens uint64_t guid, vdev_guid; 1050fa9e4066Sahrens zpool_handle_t *zhp; 1051fa9e4066Sahrens nvlist_t *pool_config; 105239c23413Seschrock uint64_t stateval, isspare; 105399653d4eSeschrock spare_cbdata_t cb = { 0 }; 105494de1d4cSeschrock boolean_t isactive; 105599653d4eSeschrock 105699653d4eSeschrock *inuse = B_FALSE; 1057fa9e4066Sahrens 105899653d4eSeschrock if (zpool_read_label(fd, &config) != 0) { 105999653d4eSeschrock (void) no_memory(hdl); 106099653d4eSeschrock return (-1); 106199653d4eSeschrock } 106299653d4eSeschrock 106399653d4eSeschrock if (config == NULL) 106499653d4eSeschrock return (0); 1065fa9e4066Sahrens 1066fa9e4066Sahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, 106746a2abf2Seschrock &stateval) == 0); 1068fa9e4066Sahrens verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, 1069fa9e4066Sahrens &vdev_guid) == 0); 1070fa9e4066Sahrens 107199653d4eSeschrock if (stateval != POOL_STATE_SPARE) { 107299653d4eSeschrock verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 107399653d4eSeschrock &name) == 0); 107499653d4eSeschrock verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 107599653d4eSeschrock &guid) == 0); 107699653d4eSeschrock } 107799653d4eSeschrock 107846a2abf2Seschrock switch (stateval) { 1079fa9e4066Sahrens case POOL_STATE_EXPORTED: 108099653d4eSeschrock ret = B_TRUE; 1081fa9e4066Sahrens break; 1082fa9e4066Sahrens 1083fa9e4066Sahrens case POOL_STATE_ACTIVE: 1084fa9e4066Sahrens /* 1085fa9e4066Sahrens * For an active pool, we have to determine if it's really part 1086eaca9bbdSeschrock * of a currently active pool (in which case the pool will exist 1087eaca9bbdSeschrock * and the guid will be the same), or whether it's part of an 1088eaca9bbdSeschrock * active pool that was disconnected without being explicitly 1089eaca9bbdSeschrock * exported. 1090fa9e4066Sahrens */ 109194de1d4cSeschrock if (pool_active(hdl, name, guid, &isactive) != 0) { 109294de1d4cSeschrock nvlist_free(config); 109394de1d4cSeschrock return (-1); 109494de1d4cSeschrock } 109594de1d4cSeschrock 109694de1d4cSeschrock if (isactive) { 1097fa9e4066Sahrens /* 1098fa9e4066Sahrens * Because the device may have been removed while 1099fa9e4066Sahrens * offlined, we only report it as active if the vdev is 1100fa9e4066Sahrens * still present in the config. Otherwise, pretend like 1101fa9e4066Sahrens * it's not in use. 1102fa9e4066Sahrens */ 110399653d4eSeschrock if ((zhp = zpool_open_canfail(hdl, name)) != NULL && 1104088e9d47Seschrock (pool_config = zpool_get_config(zhp, NULL)) 1105088e9d47Seschrock != NULL) { 1106fa9e4066Sahrens nvlist_t *nvroot; 1107fa9e4066Sahrens 1108fa9e4066Sahrens verify(nvlist_lookup_nvlist(pool_config, 1109fa9e4066Sahrens ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 111046a2abf2Seschrock ret = find_guid(nvroot, vdev_guid); 1111fa9e4066Sahrens } else { 111299653d4eSeschrock ret = B_FALSE; 1113fa9e4066Sahrens } 111499653d4eSeschrock 111539c23413Seschrock /* 111639c23413Seschrock * If this is an active spare within another pool, we 111739c23413Seschrock * treat it like an unused hot spare. This allows the 111839c23413Seschrock * user to create a pool with a hot spare that currently 111939c23413Seschrock * in use within another pool. Since we return B_TRUE, 112039c23413Seschrock * libdiskmgt will continue to prevent generic consumers 112139c23413Seschrock * from using the device. 112239c23413Seschrock */ 112339c23413Seschrock if (ret && nvlist_lookup_uint64(config, 112439c23413Seschrock ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare) 112539c23413Seschrock stateval = POOL_STATE_SPARE; 112639c23413Seschrock 112799653d4eSeschrock if (zhp != NULL) 112899653d4eSeschrock zpool_close(zhp); 1129fa9e4066Sahrens } else { 113046a2abf2Seschrock stateval = POOL_STATE_POTENTIALLY_ACTIVE; 113199653d4eSeschrock ret = B_TRUE; 113299653d4eSeschrock } 113399653d4eSeschrock break; 113499653d4eSeschrock 113599653d4eSeschrock case POOL_STATE_SPARE: 113699653d4eSeschrock /* 113799653d4eSeschrock * For a hot spare, it can be either definitively in use, or 113899653d4eSeschrock * potentially active. To determine if it's in use, we iterate 113999653d4eSeschrock * over all pools in the system and search for one with a spare 114099653d4eSeschrock * with a matching guid. 114199653d4eSeschrock * 114299653d4eSeschrock * Due to the shared nature of spares, we don't actually report 114399653d4eSeschrock * the potentially active case as in use. This means the user 114499653d4eSeschrock * can freely create pools on the hot spares of exported pools, 114599653d4eSeschrock * but to do otherwise makes the resulting code complicated, and 114699653d4eSeschrock * we end up having to deal with this case anyway. 114799653d4eSeschrock */ 114899653d4eSeschrock cb.cb_zhp = NULL; 114999653d4eSeschrock cb.cb_guid = vdev_guid; 115099653d4eSeschrock if (zpool_iter(hdl, find_spare, &cb) == 1) { 115199653d4eSeschrock name = (char *)zpool_get_name(cb.cb_zhp); 1152fa9e4066Sahrens ret = TRUE; 115399653d4eSeschrock } else { 115499653d4eSeschrock ret = FALSE; 1155fa9e4066Sahrens } 1156fa9e4066Sahrens break; 1157fa9e4066Sahrens 1158fa9e4066Sahrens default: 115999653d4eSeschrock ret = B_FALSE; 1160fa9e4066Sahrens } 1161fa9e4066Sahrens 116246a2abf2Seschrock 116346a2abf2Seschrock if (ret) { 116499653d4eSeschrock if ((*namestr = zfs_strdup(hdl, name)) == NULL) { 116599653d4eSeschrock nvlist_free(config); 116699653d4eSeschrock return (-1); 116799653d4eSeschrock } 116846a2abf2Seschrock *state = (pool_state_t)stateval; 116946a2abf2Seschrock } 117046a2abf2Seschrock 117199653d4eSeschrock if (cb.cb_zhp) 117299653d4eSeschrock zpool_close(cb.cb_zhp); 117399653d4eSeschrock 1174fa9e4066Sahrens nvlist_free(config); 117599653d4eSeschrock *inuse = ret; 117699653d4eSeschrock return (0); 1177fa9e4066Sahrens } 1178