libzfs_import.c revision 24e697d414a4df0377b91a2875f029e7b5f97247
17c478bdstevel@tonic-gate/*
27c478bdstevel@tonic-gate * CDDL HEADER START
37c478bdstevel@tonic-gate *
47c478bdstevel@tonic-gate * The contents of this file are subject to the terms of the
57c478bdstevel@tonic-gate * Common Development and Distribution License (the "License").
67c478bdstevel@tonic-gate * You may not use this file except in compliance with the License.
77c478bdstevel@tonic-gate *
87c478bdstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bdstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bdstevel@tonic-gate * See the License for the specific language governing permissions
117c478bdstevel@tonic-gate * and limitations under the License.
127c478bdstevel@tonic-gate *
137c478bdstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bdstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bdstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bdstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bdstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bdstevel@tonic-gate *
197c478bdstevel@tonic-gate * CDDL HEADER END
207c478bdstevel@tonic-gate */
217c478bdstevel@tonic-gate/*
227c478bdstevel@tonic-gate * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bdstevel@tonic-gate * Use is subject to license terms.
247c478bdstevel@tonic-gate */
257c478bdstevel@tonic-gate
267c478bdstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bdstevel@tonic-gate
287c478bdstevel@tonic-gate/*
297c478bdstevel@tonic-gate * Pool import support functions.
307c478bdstevel@tonic-gate *
317c478bdstevel@tonic-gate * To import a pool, we rely on reading the configuration information from the
327c478bdstevel@tonic-gate * ZFS label of each device.  If we successfully read the label, then we
337c478bdstevel@tonic-gate * organize the configuration information in the following hierarchy:
347c478bdstevel@tonic-gate *
357c478bdstevel@tonic-gate * 	pool guid -> toplevel vdev guid -> label txg
367c478bdstevel@tonic-gate *
377c478bdstevel@tonic-gate * Duplicate entries matching this same tuple will be discarded.  Once we have
387c478bdstevel@tonic-gate * examined every device, we pick the best label txg config for each toplevel
397c478bdstevel@tonic-gate * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
407c478bdstevel@tonic-gate * update any paths that have changed.  Finally, we attempt to import the pool
417c478bdstevel@tonic-gate * using our derived config, and record the results.
427c478bdstevel@tonic-gate */
437c478bdstevel@tonic-gate
447c478bdstevel@tonic-gate#include <devid.h>
457c478bdstevel@tonic-gate#include <dirent.h>
467c478bdstevel@tonic-gate#include <errno.h>
477c478bdstevel@tonic-gate#include <libintl.h>
487c478bdstevel@tonic-gate#include <stdlib.h>
497c478bdstevel@tonic-gate#include <string.h>
507c478bdstevel@tonic-gate#include <sys/stat.h>
517c478bdstevel@tonic-gate#include <unistd.h>
527c478bdstevel@tonic-gate#include <fcntl.h>
537c478bdstevel@tonic-gate
547c478bdstevel@tonic-gate#include <sys/vdev_impl.h>
557c478bdstevel@tonic-gate
567c478bdstevel@tonic-gate#include "libzfs.h"
577c478bdstevel@tonic-gate#include "libzfs_impl.h"
587c478bdstevel@tonic-gate
597c478bdstevel@tonic-gate/*
607c478bdstevel@tonic-gate * Intermediate structures used to gather configuration information.
617c478bdstevel@tonic-gate */
627c478bdstevel@tonic-gatetypedef struct config_entry {
637c478bdstevel@tonic-gate	uint64_t		ce_txg;
647c478bdstevel@tonic-gate	nvlist_t		*ce_config;
657c478bdstevel@tonic-gate	struct config_entry	*ce_next;
667c478bdstevel@tonic-gate} config_entry_t;
677c478bdstevel@tonic-gate
687c478bdstevel@tonic-gatetypedef struct vdev_entry {
697c478bdstevel@tonic-gate	uint64_t		ve_guid;
707c478bdstevel@tonic-gate	config_entry_t		*ve_configs;
717c478bdstevel@tonic-gate	struct vdev_entry	*ve_next;
727c478bdstevel@tonic-gate} vdev_entry_t;
737c478bdstevel@tonic-gate
747c478bdstevel@tonic-gatetypedef struct pool_entry {
757c478bdstevel@tonic-gate	uint64_t		pe_guid;
767c478bdstevel@tonic-gate	vdev_entry_t		*pe_vdevs;
777c478bdstevel@tonic-gate	struct pool_entry	*pe_next;
787c478bdstevel@tonic-gate} pool_entry_t;
797c478bdstevel@tonic-gate
807c478bdstevel@tonic-gatetypedef struct name_entry {
817c478bdstevel@tonic-gate	char			*ne_name;
827c478bdstevel@tonic-gate	uint64_t		ne_guid;
837c478bdstevel@tonic-gate	struct name_entry	*ne_next;
847c478bdstevel@tonic-gate} name_entry_t;
857c478bdstevel@tonic-gate
867c478bdstevel@tonic-gatetypedef struct pool_list {
877c478bdstevel@tonic-gate	pool_entry_t		*pools;
887c478bdstevel@tonic-gate	name_entry_t		*names;
897c478bdstevel@tonic-gate} pool_list_t;
907c478bdstevel@tonic-gate
917c478bdstevel@tonic-gatestatic char *
927c478bdstevel@tonic-gateget_devid(const char *path)
937c478bdstevel@tonic-gate{
947c478bdstevel@tonic-gate	int fd;
957c478bdstevel@tonic-gate	ddi_devid_t devid;
967c478bdstevel@tonic-gate	char *minor, *ret;
977c478bdstevel@tonic-gate
987c478bdstevel@tonic-gate	if ((fd = open(path, O_RDONLY)) < 0)
997c478bdstevel@tonic-gate		return (NULL);
1007c478bdstevel@tonic-gate
1017c478bdstevel@tonic-gate	minor = NULL;
1027c478bdstevel@tonic-gate	ret = NULL;
1037c478bdstevel@tonic-gate	if (devid_get(fd, &devid) == 0) {
1047c478bdstevel@tonic-gate		if (devid_get_minor_name(fd, &minor) == 0)
1057c478bdstevel@tonic-gate			ret = devid_str_encode(devid, minor);
1067c478bdstevel@tonic-gate		if (minor != NULL)
1077c478bdstevel@tonic-gate			devid_str_free(minor);
1087c478bdstevel@tonic-gate		devid_free(devid);
1097c478bdstevel@tonic-gate	}
1107c478bdstevel@tonic-gate	(void) close(fd);
1117c478bdstevel@tonic-gate
1127c478bdstevel@tonic-gate	return (ret);
1137c478bdstevel@tonic-gate}
1147c478bdstevel@tonic-gate
1157c478bdstevel@tonic-gate
1167c478bdstevel@tonic-gate/*
1177c478bdstevel@tonic-gate * Go through and fix up any path and/or devid information for the given vdev
1187c478bdstevel@tonic-gate * configuration.
1197c478bdstevel@tonic-gate */
1207c478bdstevel@tonic-gatestatic int
1217c478bdstevel@tonic-gatefix_paths(nvlist_t *nv, name_entry_t *names)
1227c478bdstevel@tonic-gate{
1237c478bdstevel@tonic-gate	nvlist_t **child;
1247c478bdstevel@tonic-gate	uint_t c, children;
1257c478bdstevel@tonic-gate	uint64_t guid;
1267c478bdstevel@tonic-gate	name_entry_t *ne, *best;
1277c478bdstevel@tonic-gate	char *path, *devid;
1287c478bdstevel@tonic-gate	int matched;
1297c478bdstevel@tonic-gate
1307c478bdstevel@tonic-gate	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1317c478bdstevel@tonic-gate	    &child, &children) == 0) {
1327c478bdstevel@tonic-gate		for (c = 0; c < children; c++)
1337c478bdstevel@tonic-gate			if (fix_paths(child[c], names) != 0)
1347c478bdstevel@tonic-gate				return (-1);
1357c478bdstevel@tonic-gate		return (0);
1367c478bdstevel@tonic-gate	}
1377c478bdstevel@tonic-gate
1387c478bdstevel@tonic-gate	/*
1397c478bdstevel@tonic-gate	 * This is a leaf (file or disk) vdev.  In either case, go through
1407c478bdstevel@tonic-gate	 * the name list and see if we find a matching guid.  If so, replace
1417c478bdstevel@tonic-gate	 * the path and see if we can calculate a new devid.
1427c478bdstevel@tonic-gate	 *
1437c478bdstevel@tonic-gate	 * There may be multiple names associated with a particular guid, in
1447c478bdstevel@tonic-gate	 * which case we have overlapping slices or multiple paths to the same
1457c478bdstevel@tonic-gate	 * disk.  If this is the case, then we want to pick the path that is
1467c478bdstevel@tonic-gate	 * the most similar to the original, where "most similar" is the number
1477c478bdstevel@tonic-gate	 * of matching characters starting from the end of the path.  This will
1487c478bdstevel@tonic-gate	 * preserve slice numbers even if the disks have been reorganized, and
1497c478bdstevel@tonic-gate	 * will also catch preferred disk names if multiple paths exist.
1507c478bdstevel@tonic-gate	 */
1517c478bdstevel@tonic-gate	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
152	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
153		path = NULL;
154
155	matched = 0;
156	best = NULL;
157	for (ne = names; ne != NULL; ne = ne->ne_next) {
158		if (ne->ne_guid == guid) {
159			const char *src, *dst;
160			int count;
161
162			if (path == NULL) {
163				best = ne;
164				break;
165			}
166
167			src = ne->ne_name + strlen(ne->ne_name) - 1;
168			dst = path + strlen(path) - 1;
169			for (count = 0; src >= ne->ne_name && dst >= path;
170			    src--, dst--, count++)
171				if (*src != *dst)
172					break;
173
174			/*
175			 * At this point, 'count' is the number of characters
176			 * matched from the end.
177			 */
178			if (count > matched || best == NULL) {
179				best = ne;
180				matched = count;
181			}
182		}
183	}
184
185	if (best == NULL)
186		return (0);
187
188	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
189		return (-1);
190
191	if ((devid = get_devid(best->ne_name)) == NULL) {
192		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
193	} else {
194		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
195			return (-1);
196		devid_str_free(devid);
197	}
198
199	return (0);
200}
201
202/*
203 * Add the given configuration to the list of known devices.
204 */
205static int
206add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
207    nvlist_t *config)
208{
209	uint64_t pool_guid, vdev_guid, top_guid, txg, state;
210	pool_entry_t *pe;
211	vdev_entry_t *ve;
212	config_entry_t *ce;
213	name_entry_t *ne;
214
215	/*
216	 * If this is a hot spare not currently in use or level 2 cache
217	 * device, add it to the list of names to translate, but don't do
218	 * anything else.
219	 */
220	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
221	    &state) == 0 &&
222	    (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
223	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
224		if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
225			return (-1);
226
227		if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
228			free(ne);
229			return (-1);
230		}
231		ne->ne_guid = vdev_guid;
232		ne->ne_next = pl->names;
233		pl->names = ne;
234		return (0);
235	}
236
237	/*
238	 * If we have a valid config but cannot read any of these fields, then
239	 * it means we have a half-initialized label.  In vdev_label_init()
240	 * we write a label with txg == 0 so that we can identify the device
241	 * in case the user refers to the same disk later on.  If we fail to
242	 * create the pool, we'll be left with a label in this state
243	 * which should not be considered part of a valid pool.
244	 */
245	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
246	    &pool_guid) != 0 ||
247	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
248	    &vdev_guid) != 0 ||
249	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
250	    &top_guid) != 0 ||
251	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
252	    &txg) != 0 || txg == 0) {
253		nvlist_free(config);
254		return (0);
255	}
256
257	/*
258	 * First, see if we know about this pool.  If not, then add it to the
259	 * list of known pools.
260	 */
261	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
262		if (pe->pe_guid == pool_guid)
263			break;
264	}
265
266	if (pe == NULL) {
267		if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
268			nvlist_free(config);
269			return (-1);
270		}
271		pe->pe_guid = pool_guid;
272		pe->pe_next = pl->pools;
273		pl->pools = pe;
274	}
275
276	/*
277	 * Second, see if we know about this toplevel vdev.  Add it if its
278	 * missing.
279	 */
280	for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
281		if (ve->ve_guid == top_guid)
282			break;
283	}
284
285	if (ve == NULL) {
286		if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
287			nvlist_free(config);
288			return (-1);
289		}
290		ve->ve_guid = top_guid;
291		ve->ve_next = pe->pe_vdevs;
292		pe->pe_vdevs = ve;
293	}
294
295	/*
296	 * Third, see if we have a config with a matching transaction group.  If
297	 * so, then we do nothing.  Otherwise, add it to the list of known
298	 * configs.
299	 */
300	for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
301		if (ce->ce_txg == txg)
302			break;
303	}
304
305	if (ce == NULL) {
306		if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
307			nvlist_free(config);
308			return (-1);
309		}
310		ce->ce_txg = txg;
311		ce->ce_config = config;
312		ce->ce_next = ve->ve_configs;
313		ve->ve_configs = ce;
314	} else {
315		nvlist_free(config);
316	}
317
318	/*
319	 * At this point we've successfully added our config to the list of
320	 * known configs.  The last thing to do is add the vdev guid -> path
321	 * mappings so that we can fix up the configuration as necessary before
322	 * doing the import.
323	 */
324	if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
325		return (-1);
326
327	if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
328		free(ne);
329		return (-1);
330	}
331
332	ne->ne_guid = vdev_guid;
333	ne->ne_next = pl->names;
334	pl->names = ne;
335
336	return (0);
337}
338
339/*
340 * Returns true if the named pool matches the given GUID.
341 */
342static int
343pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
344    boolean_t *isactive)
345{
346	zpool_handle_t *zhp;
347	uint64_t theguid;
348
349	if (zpool_open_silent(hdl, name, &zhp) != 0)
350		return (-1);
351
352	if (zhp == NULL) {
353		*isactive = B_FALSE;
354		return (0);
355	}
356
357	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
358	    &theguid) == 0);
359
360	zpool_close(zhp);
361
362	*isactive = (theguid == guid);
363	return (0);
364}
365
366static nvlist_t *
367refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
368{
369	nvlist_t *nvl;
370	zfs_cmd_t zc = { 0 };
371	int err;
372
373	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
374		return (NULL);
375
376	if (zcmd_alloc_dst_nvlist(hdl, &zc,
377	    zc.zc_nvlist_conf_size * 2) != 0) {
378		zcmd_free_nvlists(&zc);
379		return (NULL);
380	}
381
382	while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
383	    &zc)) != 0 && errno == ENOMEM) {
384		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
385			zcmd_free_nvlists(&zc);
386			return (NULL);
387		}
388	}
389
390	if (err) {
391		(void) zpool_standard_error(hdl, errno,
392		    dgettext(TEXT_DOMAIN, "cannot discover pools"));
393		zcmd_free_nvlists(&zc);
394		return (NULL);
395	}
396
397	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
398		zcmd_free_nvlists(&zc);
399		return (NULL);
400	}
401
402	zcmd_free_nvlists(&zc);
403	return (nvl);
404}
405
406/*
407 * Convert our list of pools into the definitive set of configurations.  We
408 * start by picking the best config for each toplevel vdev.  Once that's done,
409 * we assemble the toplevel vdevs into a full config for the pool.  We make a
410 * pass to fix up any incorrect paths, and then add it to the main list to
411 * return to the user.
412 */
413static nvlist_t *
414get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
415{
416	pool_entry_t *pe;
417	vdev_entry_t *ve;
418	config_entry_t *ce;
419	nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
420	nvlist_t **spares, **l2cache;
421	uint_t i, nspares, nl2cache;
422	boolean_t config_seen;
423	uint64_t best_txg;
424	char *name, *hostname;
425	uint64_t version, guid;
426	uint_t children = 0;
427	nvlist_t **child = NULL;
428	uint_t c;
429	boolean_t isactive;
430	uint64_t hostid;
431	nvlist_t *nvl;
432	boolean_t found_one = B_FALSE;
433
434	if (nvlist_alloc(&ret, 0, 0) != 0)
435		goto nomem;
436
437	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
438		uint64_t id;
439
440		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
441			goto nomem;
442		config_seen = B_FALSE;
443
444		/*
445		 * Iterate over all toplevel vdevs.  Grab the pool configuration
446		 * from the first one we find, and then go through the rest and
447		 * add them as necessary to the 'vdevs' member of the config.
448		 */
449		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
450
451			/*
452			 * Determine the best configuration for this vdev by
453			 * selecting the config with the latest transaction
454			 * group.
455			 */
456			best_txg = 0;
457			for (ce = ve->ve_configs; ce != NULL;
458			    ce = ce->ce_next) {
459
460				if (ce->ce_txg > best_txg) {
461					tmp = ce->ce_config;
462					best_txg = ce->ce_txg;
463				}
464			}
465
466			if (!config_seen) {
467				/*
468				 * Copy the relevant pieces of data to the pool
469				 * configuration:
470				 *
471				 *	version
472				 * 	pool guid
473				 * 	name
474				 * 	pool state
475				 *	hostid (if available)
476				 *	hostname (if available)
477				 */
478				uint64_t state;
479
480				verify(nvlist_lookup_uint64(tmp,
481				    ZPOOL_CONFIG_VERSION, &version) == 0);
482				if (nvlist_add_uint64(config,
483				    ZPOOL_CONFIG_VERSION, version) != 0)
484					goto nomem;
485				verify(nvlist_lookup_uint64(tmp,
486				    ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
487				if (nvlist_add_uint64(config,
488				    ZPOOL_CONFIG_POOL_GUID, guid) != 0)
489					goto nomem;
490				verify(nvlist_lookup_string(tmp,
491				    ZPOOL_CONFIG_POOL_NAME, &name) == 0);
492				if (nvlist_add_string(config,
493				    ZPOOL_CONFIG_POOL_NAME, name) != 0)
494					goto nomem;
495				verify(nvlist_lookup_uint64(tmp,
496				    ZPOOL_CONFIG_POOL_STATE, &state) == 0);
497				if (nvlist_add_uint64(config,
498				    ZPOOL_CONFIG_POOL_STATE, state) != 0)
499					goto nomem;
500				hostid = 0;
501				if (nvlist_lookup_uint64(tmp,
502				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
503					if (nvlist_add_uint64(config,
504					    ZPOOL_CONFIG_HOSTID, hostid) != 0)
505						goto nomem;
506					verify(nvlist_lookup_string(tmp,
507					    ZPOOL_CONFIG_HOSTNAME,
508					    &hostname) == 0);
509					if (nvlist_add_string(config,
510					    ZPOOL_CONFIG_HOSTNAME,
511					    hostname) != 0)
512						goto nomem;
513				}
514
515				config_seen = B_TRUE;
516			}
517
518			/*
519			 * Add this top-level vdev to the child array.
520			 */
521			verify(nvlist_lookup_nvlist(tmp,
522			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
523			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
524			    &id) == 0);
525			if (id >= children) {
526				nvlist_t **newchild;
527
528				newchild = zfs_alloc(hdl, (id + 1) *
529				    sizeof (nvlist_t *));
530				if (newchild == NULL)
531					goto nomem;
532
533				for (c = 0; c < children; c++)
534					newchild[c] = child[c];
535
536				free(child);
537				child = newchild;
538				children = id + 1;
539			}
540			if (nvlist_dup(nvtop, &child[id], 0) != 0)
541				goto nomem;
542
543		}
544
545		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
546		    &guid) == 0);
547
548		/*
549		 * Look for any missing top-level vdevs.  If this is the case,
550		 * create a faked up 'missing' vdev as a placeholder.  We cannot
551		 * simply compress the child array, because the kernel performs
552		 * certain checks to make sure the vdev IDs match their location
553		 * in the configuration.
554		 */
555		for (c = 0; c < children; c++)
556			if (child[c] == NULL) {
557				nvlist_t *missing;
558				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
559				    0) != 0)
560					goto nomem;
561				if (nvlist_add_string(missing,
562				    ZPOOL_CONFIG_TYPE,
563				    VDEV_TYPE_MISSING) != 0 ||
564				    nvlist_add_uint64(missing,
565				    ZPOOL_CONFIG_ID, c) != 0 ||
566				    nvlist_add_uint64(missing,
567				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
568					nvlist_free(missing);
569					goto nomem;
570				}
571				child[c] = missing;
572			}
573
574		/*
575		 * Put all of this pool's top-level vdevs into a root vdev.
576		 */
577		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
578			goto nomem;
579		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
580		    VDEV_TYPE_ROOT) != 0 ||
581		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
582		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
583		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
584		    child, children) != 0) {
585			nvlist_free(nvroot);
586			goto nomem;
587		}
588
589		for (c = 0; c < children; c++)
590			nvlist_free(child[c]);
591		free(child);
592		children = 0;
593		child = NULL;
594
595		/*
596		 * Go through and fix up any paths and/or devids based on our
597		 * known list of vdev GUID -> path mappings.
598		 */
599		if (fix_paths(nvroot, pl->names) != 0) {
600			nvlist_free(nvroot);
601			goto nomem;
602		}
603
604		/*
605		 * Add the root vdev to this pool's configuration.
606		 */
607		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
608		    nvroot) != 0) {
609			nvlist_free(nvroot);
610			goto nomem;
611		}
612		nvlist_free(nvroot);
613
614		/*
615		 * zdb uses this path to report on active pools that were
616		 * imported or created using -R.
617		 */
618		if (active_ok)
619			goto add_pool;
620
621		/*
622		 * Determine if this pool is currently active, in which case we
623		 * can't actually import it.
624		 */
625		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
626		    &name) == 0);
627		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
628		    &guid) == 0);
629
630		if (pool_active(hdl, name, guid, &isactive) != 0)
631			goto error;
632
633		if (isactive) {
634			nvlist_free(config);
635			config = NULL;
636			continue;
637		}
638
639		if ((nvl = refresh_config(hdl, config)) == NULL)
640			goto error;
641
642		nvlist_free(config);
643		config = nvl;
644
645		/*
646		 * Go through and update the paths for spares, now that we have
647		 * them.
648		 */
649		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
650		    &nvroot) == 0);
651		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
652		    &spares, &nspares) == 0) {
653			for (i = 0; i < nspares; i++) {
654				if (fix_paths(spares[i], pl->names) != 0)
655					goto nomem;
656			}
657		}
658
659		/*
660		 * Update the paths for l2cache devices.
661		 */
662		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
663		    &l2cache, &nl2cache) == 0) {
664			for (i = 0; i < nl2cache; i++) {
665				if (fix_paths(l2cache[i], pl->names) != 0)
666					goto nomem;
667			}
668		}
669
670		/*
671		 * Restore the original information read from the actual label.
672		 */
673		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
674		    DATA_TYPE_UINT64);
675		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
676		    DATA_TYPE_STRING);
677		if (hostid != 0) {
678			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
679			    hostid) == 0);
680			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
681			    hostname) == 0);
682		}
683
684add_pool:
685		/*
686		 * Add this pool to the list of configs.
687		 */
688		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
689		    &name) == 0);
690		if (nvlist_add_nvlist(ret, name, config) != 0)
691			goto nomem;
692
693		found_one = B_TRUE;
694		nvlist_free(config);
695		config = NULL;
696	}
697
698	if (!found_one) {
699		nvlist_free(ret);
700		ret = NULL;
701	}
702
703	return (ret);
704
705nomem:
706	(void) no_memory(hdl);
707error:
708	nvlist_free(config);
709	nvlist_free(ret);
710	for (c = 0; c < children; c++)
711		nvlist_free(child[c]);
712	free(child);
713
714	return (NULL);
715}
716
717/*
718 * Return the offset of the given label.
719 */
720static uint64_t
721label_offset(uint64_t size, int l)
722{
723	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
724	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
725	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
726}
727
728/*
729 * Given a file descriptor, read the label information and return an nvlist
730 * describing the configuration, if there is one.
731 */
732int
733zpool_read_label(int fd, nvlist_t **config)
734{
735	struct stat64 statbuf;
736	int l;
737	vdev_label_t *label;
738	uint64_t state, txg, size;
739
740	*config = NULL;
741
742	if (fstat64(fd, &statbuf) == -1)
743		return (0);
744	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
745
746	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
747		return (-1);
748
749	for (l = 0; l < VDEV_LABELS; l++) {
750		if (pread64(fd, label, sizeof (vdev_label_t),
751		    label_offset(size, l)) != sizeof (vdev_label_t))
752			continue;
753
754		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
755		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
756			continue;
757
758		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
759		    &state) != 0 || state > POOL_STATE_L2CACHE) {
760			nvlist_free(*config);
761			continue;
762		}
763
764		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
765		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
766		    &txg) != 0 || txg == 0)) {
767			nvlist_free(*config);
768			continue;
769		}
770
771		free(label);
772		return (0);
773	}
774
775	free(label);
776	*config = NULL;
777	return (0);
778}
779
780/*
781 * Given a list of directories to search, find all pools stored on disk.  This
782 * includes partial pools which are not available to import.  If no args are
783 * given (argc is 0), then the default directory (/dev/dsk) is searched.
784 * poolname or guid (but not both) are provided by the caller when trying
785 * to import a specific pool.
786 */
787static nvlist_t *
788zpool_find_import_impl(libzfs_handle_t *hdl, int argc, char **argv,
789    boolean_t active_ok, char *poolname, uint64_t guid)
790{
791	int i;
792	DIR *dirp = NULL;
793	struct dirent64 *dp;
794	char path[MAXPATHLEN];
795	char *end;
796	size_t pathleft;
797	struct stat64 statbuf;
798	nvlist_t *ret = NULL, *config;
799	static char *default_dir = "/dev/dsk";
800	int fd;
801	pool_list_t pools = { 0 };
802	pool_entry_t *pe, *penext;
803	vdev_entry_t *ve, *venext;
804	config_entry_t *ce, *cenext;
805	name_entry_t *ne, *nenext;
806
807	verify(poolname == NULL || guid == 0);
808
809	if (argc == 0) {
810		argc = 1;
811		argv = &default_dir;
812	}
813
814	/*
815	 * Go through and read the label configuration information from every
816	 * possible device, organizing the information according to pool GUID
817	 * and toplevel GUID.
818	 */
819	for (i = 0; i < argc; i++) {
820		char *rdsk;
821		int dfd;
822
823		/* use realpath to normalize the path */
824		if (realpath(argv[i], path) == 0) {
825			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
826			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
827			    argv[i]);
828			goto error;
829		}
830		end = &path[strlen(path)];
831		*end++ = '/';
832		*end = 0;
833		pathleft = &path[sizeof (path)] - end;
834
835		/*
836		 * Using raw devices instead of block devices when we're
837		 * reading the labels skips a bunch of slow operations during
838		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
839		 */
840		if (strcmp(path, "/dev/dsk/") == 0)
841			rdsk = "/dev/rdsk/";
842		else
843			rdsk = path;
844
845		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
846		    (dirp = fdopendir(dfd)) == NULL) {
847			zfs_error_aux(hdl, strerror(errno));
848			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
849			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
850			    rdsk);
851			goto error;
852		}
853
854		/*
855		 * This is not MT-safe, but we have no MT consumers of libzfs
856		 */
857		while ((dp = readdir64(dirp)) != NULL) {
858			const char *name = dp->d_name;
859			if (name[0] == '.' &&
860			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
861				continue;
862
863			if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
864				continue;
865
866			/*
867			 * Ignore failed stats.  We only want regular
868			 * files, character devs and block devs.
869			 */
870			if (fstat64(fd, &statbuf) != 0 ||
871			    (!S_ISREG(statbuf.st_mode) &&
872			    !S_ISCHR(statbuf.st_mode) &&
873			    !S_ISBLK(statbuf.st_mode))) {
874				(void) close(fd);
875				continue;
876			}
877
878			if ((zpool_read_label(fd, &config)) != 0) {
879				(void) close(fd);
880				(void) no_memory(hdl);
881				goto error;
882			}
883
884			(void) close(fd);
885
886			if (config != NULL) {
887				boolean_t matched = B_TRUE;
888
889				if (poolname != NULL) {
890					char *pname;
891					verify(nvlist_lookup_string(config,
892					    ZPOOL_CONFIG_POOL_NAME,
893					    &pname) == 0);
894					if (strcmp(poolname, pname) != 0)
895						matched = B_FALSE;
896				} else if (guid != 0) {
897					uint64_t this_guid;
898					verify(nvlist_lookup_uint64(config,
899					    ZPOOL_CONFIG_POOL_GUID,
900					    &this_guid) == 0);
901					if (guid != this_guid)
902						matched = B_FALSE;
903				}
904				if (!matched) {
905					nvlist_free(config);
906					config = NULL;
907					continue;
908				}
909				/* use the non-raw path for the config */
910				(void) strlcpy(end, name, pathleft);
911				if (add_config(hdl, &pools, path, config) != 0)
912					goto error;
913			}
914		}
915
916		(void) closedir(dirp);
917		dirp = NULL;
918	}
919
920	ret = get_configs(hdl, &pools, active_ok);
921
922error:
923	for (pe = pools.pools; pe != NULL; pe = penext) {
924		penext = pe->pe_next;
925		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
926			venext = ve->ve_next;
927			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
928				cenext = ce->ce_next;
929				if (ce->ce_config)
930					nvlist_free(ce->ce_config);
931				free(ce);
932			}
933			free(ve);
934		}
935		free(pe);
936	}
937
938	for (ne = pools.names; ne != NULL; ne = nenext) {
939		nenext = ne->ne_next;
940		if (ne->ne_name)
941			free(ne->ne_name);
942		free(ne);
943	}
944
945	if (dirp)
946		(void) closedir(dirp);
947
948	return (ret);
949}
950
951nvlist_t *
952zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
953{
954	return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, 0));
955}
956
957nvlist_t *
958zpool_find_import_byname(libzfs_handle_t *hdl, int argc, char **argv,
959    char *pool)
960{
961	return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, pool, 0));
962}
963
964nvlist_t *
965zpool_find_import_byguid(libzfs_handle_t *hdl, int argc, char **argv,
966    uint64_t guid)
967{
968	return (zpool_find_import_impl(hdl, argc, argv, B_FALSE, NULL, guid));
969}
970
971nvlist_t *
972zpool_find_import_activeok(libzfs_handle_t *hdl, int argc, char **argv)
973{
974	return (zpool_find_import_impl(hdl, argc, argv, B_TRUE, NULL, 0));
975}
976
977/*
978 * Given a cache file, return the contents as a list of importable pools.
979 * poolname or guid (but not both) are provided by the caller when trying
980 * to import a specific pool.
981 */
982nvlist_t *
983zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
984    boolean_t active_ok, char *poolname, uint64_t guid)
985{
986	char *buf;
987	int fd;
988	struct stat64 statbuf;
989	nvlist_t *raw, *src, *dst;
990	nvlist_t *pools;
991	nvpair_t *elem;
992	char *name;
993	uint64_t this_guid;
994	boolean_t active;
995
996	verify(poolname == NULL || guid == 0);
997
998	if ((fd = open(cachefile, O_RDONLY)) < 0) {
999		zfs_error_aux(hdl, "%s", strerror(errno));
1000		(void) zfs_error(hdl, EZFS_BADCACHE,
1001		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
1002		return (NULL);
1003	}
1004
1005	if (fstat64(fd, &statbuf) != 0) {
1006		zfs_error_aux(hdl, "%s", strerror(errno));
1007		(void) close(fd);
1008		(void) zfs_error(hdl, EZFS_BADCACHE,
1009		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1010		return (NULL);
1011	}
1012
1013	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1014		(void) close(fd);
1015		return (NULL);
1016	}
1017
1018	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1019		(void) close(fd);
1020		free(buf);
1021		(void) zfs_error(hdl, EZFS_BADCACHE,
1022		    dgettext(TEXT_DOMAIN,
1023		    "failed to read cache file contents"));
1024		return (NULL);
1025	}
1026
1027	(void) close(fd);
1028
1029	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1030		free(buf);
1031		(void) zfs_error(hdl, EZFS_BADCACHE,
1032		    dgettext(TEXT_DOMAIN,
1033		    "invalid or corrupt cache file contents"));
1034		return (NULL);
1035	}
1036
1037	free(buf);
1038
1039	/*
1040	 * Go through and get the current state of the pools and refresh their
1041	 * state.
1042	 */
1043	if (nvlist_alloc(&pools, 0, 0) != 0) {
1044		(void) no_memory(hdl);
1045		nvlist_free(raw);
1046		return (NULL);
1047	}
1048
1049	elem = NULL;
1050	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1051		verify(nvpair_value_nvlist(elem, &src) == 0);
1052
1053		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1054		    &name) == 0);
1055		if (poolname != NULL && strcmp(poolname, name) != 0)
1056			continue;
1057
1058		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1059		    &this_guid) == 0);
1060		if (guid != 0) {
1061			verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1062			    &this_guid) == 0);
1063			if (guid != this_guid)
1064				continue;
1065		}
1066
1067		if (!active_ok) {
1068			if (pool_active(hdl, name, this_guid, &active) != 0) {
1069				nvlist_free(raw);
1070				nvlist_free(pools);
1071				return (NULL);
1072			}
1073
1074			if (active)
1075				continue;
1076
1077			if ((dst = refresh_config(hdl, src)) == NULL) {
1078				nvlist_free(raw);
1079				nvlist_free(pools);
1080				return (NULL);
1081			}
1082
1083			if (nvlist_add_nvlist(pools, nvpair_name(elem), dst)
1084			    != 0) {
1085				(void) no_memory(hdl);
1086				nvlist_free(dst);
1087				nvlist_free(raw);
1088				nvlist_free(pools);
1089				return (NULL);
1090			}
1091			nvlist_free(dst);
1092		} else {
1093			if (nvlist_add_nvlist(pools, nvpair_name(elem), src)
1094			    != 0) {
1095				(void) no_memory(hdl);
1096				nvlist_free(raw);
1097				nvlist_free(pools);
1098				return (NULL);
1099			}
1100		}
1101	}
1102
1103	nvlist_free(raw);
1104	return (pools);
1105}
1106
1107
1108boolean_t
1109find_guid(nvlist_t *nv, uint64_t guid)
1110{
1111	uint64_t tmp;
1112	nvlist_t **child;
1113	uint_t c, children;
1114
1115	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1116	if (tmp == guid)
1117		return (B_TRUE);
1118
1119	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1120	    &child, &children) == 0) {
1121		for (c = 0; c < children; c++)
1122			if (find_guid(child[c], guid))
1123				return (B_TRUE);
1124	}
1125
1126	return (B_FALSE);
1127}
1128
1129typedef struct aux_cbdata {
1130	const char	*cb_type;
1131	uint64_t	cb_guid;
1132	zpool_handle_t	*cb_zhp;
1133} aux_cbdata_t;
1134
1135static int
1136find_aux(zpool_handle_t *zhp, void *data)
1137{
1138	aux_cbdata_t *cbp = data;
1139	nvlist_t **list;
1140	uint_t i, count;
1141	uint64_t guid;
1142	nvlist_t *nvroot;
1143
1144	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1145	    &nvroot) == 0);
1146
1147	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1148	    &list, &count) == 0) {
1149		for (i = 0; i < count; i++) {
1150			verify(nvlist_lookup_uint64(list[i],
1151			    ZPOOL_CONFIG_GUID, &guid) == 0);
1152			if (guid == cbp->cb_guid) {
1153				cbp->cb_zhp = zhp;
1154				return (1);
1155			}
1156		}
1157	}
1158
1159	zpool_close(zhp);
1160	return (0);
1161}
1162
1163/*
1164 * Determines if the pool is in use.  If so, it returns true and the state of
1165 * the pool as well as the name of the pool.  Both strings are allocated and
1166 * must be freed by the caller.
1167 */
1168int
1169zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1170    boolean_t *inuse)
1171{
1172	nvlist_t *config;
1173	char *name;
1174	boolean_t ret;
1175	uint64_t guid, vdev_guid;
1176	zpool_handle_t *zhp;
1177	nvlist_t *pool_config;
1178	uint64_t stateval, isspare;
1179	aux_cbdata_t cb = { 0 };
1180	boolean_t isactive;
1181
1182	*inuse = B_FALSE;
1183
1184	if (zpool_read_label(fd, &config) != 0) {
1185		(void) no_memory(hdl);
1186		return (-1);
1187	}
1188
1189	if (config == NULL)
1190		return (0);
1191
1192	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1193	    &stateval) == 0);
1194	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1195	    &vdev_guid) == 0);
1196
1197	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1198		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1199		    &name) == 0);
1200		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1201		    &guid) == 0);
1202	}
1203
1204	switch (stateval) {
1205	case POOL_STATE_EXPORTED:
1206		ret = B_TRUE;
1207		break;
1208
1209	case POOL_STATE_ACTIVE:
1210		/*
1211		 * For an active pool, we have to determine if it's really part
1212		 * of a currently active pool (in which case the pool will exist
1213		 * and the guid will be the same), or whether it's part of an
1214		 * active pool that was disconnected without being explicitly
1215		 * exported.
1216		 */
1217		if (pool_active(hdl, name, guid, &isactive) != 0) {
1218			nvlist_free(config);
1219			return (-1);
1220		}
1221
1222		if (isactive) {
1223			/*
1224			 * Because the device may have been removed while
1225			 * offlined, we only report it as active if the vdev is
1226			 * still present in the config.  Otherwise, pretend like
1227			 * it's not in use.
1228			 */
1229			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1230			    (pool_config = zpool_get_config(zhp, NULL))
1231			    != NULL) {
1232				nvlist_t *nvroot;
1233
1234				verify(nvlist_lookup_nvlist(pool_config,
1235				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1236				ret = find_guid(nvroot, vdev_guid);
1237			} else {
1238				ret = B_FALSE;
1239			}
1240
1241			/*
1242			 * If this is an active spare within another pool, we
1243			 * treat it like an unused hot spare.  This allows the
1244			 * user to create a pool with a hot spare that currently
1245			 * in use within another pool.  Since we return B_TRUE,
1246			 * libdiskmgt will continue to prevent generic consumers
1247			 * from using the device.
1248			 */
1249			if (ret && nvlist_lookup_uint64(config,
1250			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1251				stateval = POOL_STATE_SPARE;
1252
1253			if (zhp != NULL)
1254				zpool_close(zhp);
1255		} else {
1256			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1257			ret = B_TRUE;
1258		}
1259		break;
1260
1261	case POOL_STATE_SPARE:
1262		/*
1263		 * For a hot spare, it can be either definitively in use, or
1264		 * potentially active.  To determine if it's in use, we iterate
1265		 * over all pools in the system and search for one with a spare
1266		 * with a matching guid.
1267		 *
1268		 * Due to the shared nature of spares, we don't actually report
1269		 * the potentially active case as in use.  This means the user
1270		 * can freely create pools on the hot spares of exported pools,
1271		 * but to do otherwise makes the resulting code complicated, and
1272		 * we end up having to deal with this case anyway.
1273		 */
1274		cb.cb_zhp = NULL;
1275		cb.cb_guid = vdev_guid;
1276		cb.cb_type = ZPOOL_CONFIG_SPARES;
1277		if (zpool_iter(hdl, find_aux, &cb) == 1) {
1278			name = (char *)zpool_get_name(cb.cb_zhp);
1279			ret = TRUE;
1280		} else {
1281			ret = FALSE;
1282		}
1283		break;
1284
1285	case POOL_STATE_L2CACHE:
1286
1287		/*
1288		 * Check if any pool is currently using this l2cache device.
1289		 */
1290		cb.cb_zhp = NULL;
1291		cb.cb_guid = vdev_guid;
1292		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1293		if (zpool_iter(hdl, find_aux, &cb) == 1) {
1294			name = (char *)zpool_get_name(cb.cb_zhp);
1295			ret = TRUE;
1296		} else {
1297			ret = FALSE;
1298		}
1299		break;
1300
1301	default:
1302		ret = B_FALSE;
1303	}
1304
1305
1306	if (ret) {
1307		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1308			if (cb.cb_zhp)
1309				zpool_close(cb.cb_zhp);
1310			nvlist_free(config);
1311			return (-1);
1312		}
1313		*state = (pool_state_t)stateval;
1314	}
1315
1316	if (cb.cb_zhp)
1317		zpool_close(cb.cb_zhp);
1318
1319	nvlist_free(config);
1320	*inuse = ret;
1321	return (0);
1322}
1323