xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_pool.c (revision 15e6edf145a9c2bb0e0272cf8debe823bb97529b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
52 
53 /*
54  * ====================================================================
55  *   zpool property functions
56  * ====================================================================
57  */
58 
59 static int
60 zpool_get_all_props(zpool_handle_t *zhp)
61 {
62 	zfs_cmd_t zc = { 0 };
63 	libzfs_handle_t *hdl = zhp->zpool_hdl;
64 
65 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
66 
67 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
68 		return (-1);
69 
70 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
71 		if (errno == ENOMEM) {
72 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
73 				zcmd_free_nvlists(&zc);
74 				return (-1);
75 			}
76 		} else {
77 			zcmd_free_nvlists(&zc);
78 			return (-1);
79 		}
80 	}
81 
82 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
83 		zcmd_free_nvlists(&zc);
84 		return (-1);
85 	}
86 
87 	zcmd_free_nvlists(&zc);
88 
89 	return (0);
90 }
91 
92 static int
93 zpool_props_refresh(zpool_handle_t *zhp)
94 {
95 	nvlist_t *old_props;
96 
97 	old_props = zhp->zpool_props;
98 
99 	if (zpool_get_all_props(zhp) != 0)
100 		return (-1);
101 
102 	nvlist_free(old_props);
103 	return (0);
104 }
105 
106 static char *
107 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
108     zprop_source_t *src)
109 {
110 	nvlist_t *nv, *nvl;
111 	uint64_t ival;
112 	char *value;
113 	zprop_source_t source;
114 
115 	nvl = zhp->zpool_props;
116 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
117 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
118 		source = ival;
119 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
120 	} else {
121 		source = ZPROP_SRC_DEFAULT;
122 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
123 			value = "-";
124 	}
125 
126 	if (src)
127 		*src = source;
128 
129 	return (value);
130 }
131 
132 uint64_t
133 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
134 {
135 	nvlist_t *nv, *nvl;
136 	uint64_t value;
137 	zprop_source_t source;
138 
139 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
140 		return (zpool_prop_default_numeric(prop));
141 
142 	nvl = zhp->zpool_props;
143 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
144 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
145 		source = value;
146 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
147 	} else {
148 		source = ZPROP_SRC_DEFAULT;
149 		value = zpool_prop_default_numeric(prop);
150 	}
151 
152 	if (src)
153 		*src = source;
154 
155 	return (value);
156 }
157 
158 /*
159  * Map VDEV STATE to printed strings.
160  */
161 char *
162 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
163 {
164 	switch (state) {
165 	case VDEV_STATE_CLOSED:
166 	case VDEV_STATE_OFFLINE:
167 		return (gettext("OFFLINE"));
168 	case VDEV_STATE_REMOVED:
169 		return (gettext("REMOVED"));
170 	case VDEV_STATE_CANT_OPEN:
171 		if (aux == VDEV_AUX_CORRUPT_DATA)
172 			return (gettext("FAULTED"));
173 		else
174 			return (gettext("UNAVAIL"));
175 	case VDEV_STATE_FAULTED:
176 		return (gettext("FAULTED"));
177 	case VDEV_STATE_DEGRADED:
178 		return (gettext("DEGRADED"));
179 	case VDEV_STATE_HEALTHY:
180 		return (gettext("ONLINE"));
181 	}
182 
183 	return (gettext("UNKNOWN"));
184 }
185 
186 /*
187  * Get a zpool property value for 'prop' and return the value in
188  * a pre-allocated buffer.
189  */
190 int
191 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
192     zprop_source_t *srctype)
193 {
194 	uint64_t intval;
195 	const char *strval;
196 	zprop_source_t src = ZPROP_SRC_NONE;
197 	nvlist_t *nvroot;
198 	vdev_stat_t *vs;
199 	uint_t vsc;
200 
201 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
202 		if (prop == ZPOOL_PROP_NAME)
203 			(void) strlcpy(buf, zpool_get_name(zhp), len);
204 		else if (prop == ZPOOL_PROP_HEALTH)
205 			(void) strlcpy(buf, "FAULTED", len);
206 		else
207 			(void) strlcpy(buf, "-", len);
208 		return (0);
209 	}
210 
211 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
212 	    prop != ZPOOL_PROP_NAME)
213 		return (-1);
214 
215 	switch (zpool_prop_get_type(prop)) {
216 	case PROP_TYPE_STRING:
217 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
218 		    len);
219 		break;
220 
221 	case PROP_TYPE_NUMBER:
222 		intval = zpool_get_prop_int(zhp, prop, &src);
223 
224 		switch (prop) {
225 		case ZPOOL_PROP_SIZE:
226 		case ZPOOL_PROP_USED:
227 		case ZPOOL_PROP_AVAILABLE:
228 			(void) zfs_nicenum(intval, buf, len);
229 			break;
230 
231 		case ZPOOL_PROP_CAPACITY:
232 			(void) snprintf(buf, len, "%llu%%",
233 			    (u_longlong_t)intval);
234 			break;
235 
236 		case ZPOOL_PROP_HEALTH:
237 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
238 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
239 			verify(nvlist_lookup_uint64_array(nvroot,
240 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
241 
242 			(void) strlcpy(buf, zpool_state_to_name(intval,
243 			    vs->vs_aux), len);
244 			break;
245 		default:
246 			(void) snprintf(buf, len, "%llu", intval);
247 		}
248 		break;
249 
250 	case PROP_TYPE_INDEX:
251 		intval = zpool_get_prop_int(zhp, prop, &src);
252 		if (zpool_prop_index_to_string(prop, intval, &strval)
253 		    != 0)
254 			return (-1);
255 		(void) strlcpy(buf, strval, len);
256 		break;
257 
258 	default:
259 		abort();
260 	}
261 
262 	if (srctype)
263 		*srctype = src;
264 
265 	return (0);
266 }
267 
268 /*
269  * Check if the bootfs name has the same pool name as it is set to.
270  * Assuming bootfs is a valid dataset name.
271  */
272 static boolean_t
273 bootfs_name_valid(const char *pool, char *bootfs)
274 {
275 	int len = strlen(pool);
276 
277 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM))
278 		return (B_FALSE);
279 
280 	if (strncmp(pool, bootfs, len) == 0 &&
281 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
282 		return (B_TRUE);
283 
284 	return (B_FALSE);
285 }
286 
287 /*
288  * Inspect the configuration to determine if any of the devices contain
289  * an EFI label.
290  */
291 static boolean_t
292 pool_uses_efi(nvlist_t *config)
293 {
294 	nvlist_t **child;
295 	uint_t c, children;
296 
297 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
298 	    &child, &children) != 0)
299 		return (read_efi_label(config, NULL) >= 0);
300 
301 	for (c = 0; c < children; c++) {
302 		if (pool_uses_efi(child[c]))
303 			return (B_TRUE);
304 	}
305 	return (B_FALSE);
306 }
307 
308 /*
309  * Given an nvlist of zpool properties to be set, validate that they are
310  * correct, and parse any numeric properties (index, boolean, etc) if they are
311  * specified as strings.
312  */
313 static nvlist_t *
314 zpool_validate_properties(libzfs_handle_t *hdl, const char *poolname,
315     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
316 {
317 	nvpair_t *elem;
318 	nvlist_t *retprops;
319 	zpool_prop_t prop;
320 	char *strval;
321 	uint64_t intval;
322 	char *slash;
323 	struct stat64 statbuf;
324 	zpool_handle_t *zhp;
325 	nvlist_t *nvroot;
326 
327 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
328 		(void) no_memory(hdl);
329 		return (NULL);
330 	}
331 
332 	elem = NULL;
333 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
334 		const char *propname = nvpair_name(elem);
335 
336 		/*
337 		 * Make sure this property is valid and applies to this type.
338 		 */
339 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
340 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
341 			    "invalid property '%s'"), propname);
342 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
343 			goto error;
344 		}
345 
346 		if (zpool_prop_readonly(prop)) {
347 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
348 			    "is readonly"), propname);
349 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
350 			goto error;
351 		}
352 
353 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
354 		    &strval, &intval, errbuf) != 0)
355 			goto error;
356 
357 		/*
358 		 * Perform additional checking for specific properties.
359 		 */
360 		switch (prop) {
361 		case ZPOOL_PROP_VERSION:
362 			if (intval < version || intval > SPA_VERSION) {
363 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
364 				    "property '%s' number %d is invalid."),
365 				    propname, intval);
366 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
367 				goto error;
368 			}
369 			break;
370 
371 		case ZPOOL_PROP_BOOTFS:
372 			if (create_or_import) {
373 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
374 				    "property '%s' cannot be set at creation "
375 				    "or import time"), propname);
376 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
377 				goto error;
378 			}
379 
380 			if (version < SPA_VERSION_BOOTFS) {
381 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
382 				    "pool must be upgraded to support "
383 				    "'%s' property"), propname);
384 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
385 				goto error;
386 			}
387 
388 			/*
389 			 * bootfs property value has to be a dataset name and
390 			 * the dataset has to be in the same pool as it sets to.
391 			 */
392 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
393 			    strval)) {
394 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
395 				    "is an invalid name"), strval);
396 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
397 				goto error;
398 			}
399 
400 			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
401 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
402 				    "could not open pool '%s'"), poolname);
403 				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
404 				goto error;
405 			}
406 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
407 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
408 
409 			/*
410 			 * bootfs property cannot be set on a disk which has
411 			 * been EFI labeled.
412 			 */
413 			if (pool_uses_efi(nvroot)) {
414 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
415 				    "property '%s' not supported on "
416 				    "EFI labeled devices"), propname);
417 				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
418 				zpool_close(zhp);
419 				goto error;
420 			}
421 			zpool_close(zhp);
422 			break;
423 
424 		case ZPOOL_PROP_ALTROOT:
425 			if (!create_or_import) {
426 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
427 				    "property '%s' can only be set during pool "
428 				    "creation or import"), propname);
429 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
430 				goto error;
431 			}
432 
433 			if (strval[0] != '/') {
434 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
435 				    "bad alternate root '%s'"), strval);
436 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
437 				goto error;
438 			}
439 			break;
440 
441 		case ZPOOL_PROP_CACHEFILE:
442 			if (strval[0] == '\0')
443 				break;
444 
445 			if (strcmp(strval, "none") == 0)
446 				break;
447 
448 			if (strval[0] != '/') {
449 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
450 				    "property '%s' must be empty, an "
451 				    "absolute path, or 'none'"), propname);
452 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
453 				goto error;
454 			}
455 
456 			slash = strrchr(strval, '/');
457 
458 			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
459 			    strcmp(slash, "/..") == 0) {
460 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
461 				    "'%s' is not a valid file"), strval);
462 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
463 				goto error;
464 			}
465 
466 			*slash = '\0';
467 
468 			if (strval[0] != '\0' &&
469 			    (stat64(strval, &statbuf) != 0 ||
470 			    !S_ISDIR(statbuf.st_mode))) {
471 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
472 				    "'%s' is not a valid directory"),
473 				    strval);
474 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
475 				goto error;
476 			}
477 
478 			*slash = '/';
479 			break;
480 		}
481 	}
482 
483 	return (retprops);
484 error:
485 	nvlist_free(retprops);
486 	return (NULL);
487 }
488 
489 /*
490  * Set zpool property : propname=propval.
491  */
492 int
493 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
494 {
495 	zfs_cmd_t zc = { 0 };
496 	int ret = -1;
497 	char errbuf[1024];
498 	nvlist_t *nvl = NULL;
499 	nvlist_t *realprops;
500 	uint64_t version;
501 
502 	(void) snprintf(errbuf, sizeof (errbuf),
503 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
504 	    zhp->zpool_name);
505 
506 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
507 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
508 
509 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
510 		return (no_memory(zhp->zpool_hdl));
511 
512 	if (nvlist_add_string(nvl, propname, propval) != 0) {
513 		nvlist_free(nvl);
514 		return (no_memory(zhp->zpool_hdl));
515 	}
516 
517 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
518 	if ((realprops = zpool_validate_properties(zhp->zpool_hdl,
519 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
520 		nvlist_free(nvl);
521 		return (-1);
522 	}
523 
524 	nvlist_free(nvl);
525 	nvl = realprops;
526 
527 	/*
528 	 * Execute the corresponding ioctl() to set this property.
529 	 */
530 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
531 
532 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
533 		nvlist_free(nvl);
534 		return (-1);
535 	}
536 
537 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
538 
539 	zcmd_free_nvlists(&zc);
540 	nvlist_free(nvl);
541 
542 	if (ret)
543 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
544 	else
545 		(void) zpool_props_refresh(zhp);
546 
547 	return (ret);
548 }
549 
550 int
551 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
552 {
553 	libzfs_handle_t *hdl = zhp->zpool_hdl;
554 	zprop_list_t *entry;
555 	char buf[ZFS_MAXPROPLEN];
556 
557 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
558 		return (-1);
559 
560 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
561 
562 		if (entry->pl_fixed)
563 			continue;
564 
565 		if (entry->pl_prop != ZPROP_INVAL &&
566 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
567 		    NULL) == 0) {
568 			if (strlen(buf) > entry->pl_width)
569 				entry->pl_width = strlen(buf);
570 		}
571 	}
572 
573 	return (0);
574 }
575 
576 
577 /*
578  * Validate the given pool name, optionally putting an extended error message in
579  * 'buf'.
580  */
581 boolean_t
582 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
583 {
584 	namecheck_err_t why;
585 	char what;
586 	int ret;
587 
588 	ret = pool_namecheck(pool, &why, &what);
589 
590 	/*
591 	 * The rules for reserved pool names were extended at a later point.
592 	 * But we need to support users with existing pools that may now be
593 	 * invalid.  So we only check for this expanded set of names during a
594 	 * create (or import), and only in userland.
595 	 */
596 	if (ret == 0 && !isopen &&
597 	    (strncmp(pool, "mirror", 6) == 0 ||
598 	    strncmp(pool, "raidz", 5) == 0 ||
599 	    strncmp(pool, "spare", 5) == 0 ||
600 	    strcmp(pool, "log") == 0)) {
601 		if (hdl != NULL)
602 			zfs_error_aux(hdl,
603 			    dgettext(TEXT_DOMAIN, "name is reserved"));
604 		return (B_FALSE);
605 	}
606 
607 
608 	if (ret != 0) {
609 		if (hdl != NULL) {
610 			switch (why) {
611 			case NAME_ERR_TOOLONG:
612 				zfs_error_aux(hdl,
613 				    dgettext(TEXT_DOMAIN, "name is too long"));
614 				break;
615 
616 			case NAME_ERR_INVALCHAR:
617 				zfs_error_aux(hdl,
618 				    dgettext(TEXT_DOMAIN, "invalid character "
619 				    "'%c' in pool name"), what);
620 				break;
621 
622 			case NAME_ERR_NOLETTER:
623 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
624 				    "name must begin with a letter"));
625 				break;
626 
627 			case NAME_ERR_RESERVED:
628 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
629 				    "name is reserved"));
630 				break;
631 
632 			case NAME_ERR_DISKLIKE:
633 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
634 				    "pool name is reserved"));
635 				break;
636 
637 			case NAME_ERR_LEADING_SLASH:
638 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
639 				    "leading slash in name"));
640 				break;
641 
642 			case NAME_ERR_EMPTY_COMPONENT:
643 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
644 				    "empty component in name"));
645 				break;
646 
647 			case NAME_ERR_TRAILING_SLASH:
648 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
649 				    "trailing slash in name"));
650 				break;
651 
652 			case NAME_ERR_MULTIPLE_AT:
653 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
654 				    "multiple '@' delimiters in name"));
655 				break;
656 
657 			}
658 		}
659 		return (B_FALSE);
660 	}
661 
662 	return (B_TRUE);
663 }
664 
665 /*
666  * Open a handle to the given pool, even if the pool is currently in the FAULTED
667  * state.
668  */
669 zpool_handle_t *
670 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
671 {
672 	zpool_handle_t *zhp;
673 	boolean_t missing;
674 
675 	/*
676 	 * Make sure the pool name is valid.
677 	 */
678 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
679 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
680 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
681 		    pool);
682 		return (NULL);
683 	}
684 
685 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
686 		return (NULL);
687 
688 	zhp->zpool_hdl = hdl;
689 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
690 
691 	if (zpool_refresh_stats(zhp, &missing) != 0) {
692 		zpool_close(zhp);
693 		return (NULL);
694 	}
695 
696 	if (missing) {
697 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
698 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
699 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
700 		zpool_close(zhp);
701 		return (NULL);
702 	}
703 
704 	return (zhp);
705 }
706 
707 /*
708  * Like the above, but silent on error.  Used when iterating over pools (because
709  * the configuration cache may be out of date).
710  */
711 int
712 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
713 {
714 	zpool_handle_t *zhp;
715 	boolean_t missing;
716 
717 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
718 		return (-1);
719 
720 	zhp->zpool_hdl = hdl;
721 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
722 
723 	if (zpool_refresh_stats(zhp, &missing) != 0) {
724 		zpool_close(zhp);
725 		return (-1);
726 	}
727 
728 	if (missing) {
729 		zpool_close(zhp);
730 		*ret = NULL;
731 		return (0);
732 	}
733 
734 	*ret = zhp;
735 	return (0);
736 }
737 
738 /*
739  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
740  * state.
741  */
742 zpool_handle_t *
743 zpool_open(libzfs_handle_t *hdl, const char *pool)
744 {
745 	zpool_handle_t *zhp;
746 
747 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
748 		return (NULL);
749 
750 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
751 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
752 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
753 		zpool_close(zhp);
754 		return (NULL);
755 	}
756 
757 	return (zhp);
758 }
759 
760 /*
761  * Close the handle.  Simply frees the memory associated with the handle.
762  */
763 void
764 zpool_close(zpool_handle_t *zhp)
765 {
766 	if (zhp->zpool_config)
767 		nvlist_free(zhp->zpool_config);
768 	if (zhp->zpool_old_config)
769 		nvlist_free(zhp->zpool_old_config);
770 	if (zhp->zpool_props)
771 		nvlist_free(zhp->zpool_props);
772 	free(zhp);
773 }
774 
775 /*
776  * Return the name of the pool.
777  */
778 const char *
779 zpool_get_name(zpool_handle_t *zhp)
780 {
781 	return (zhp->zpool_name);
782 }
783 
784 
785 /*
786  * Return the state of the pool (ACTIVE or UNAVAILABLE)
787  */
788 int
789 zpool_get_state(zpool_handle_t *zhp)
790 {
791 	return (zhp->zpool_state);
792 }
793 
794 /*
795  * Create the named pool, using the provided vdev list.  It is assumed
796  * that the consumer has already validated the contents of the nvlist, so we
797  * don't have to worry about error semantics.
798  */
799 int
800 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
801     nvlist_t *props)
802 {
803 	zfs_cmd_t zc = { 0 };
804 	char msg[1024];
805 	char *altroot;
806 
807 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
808 	    "cannot create '%s'"), pool);
809 
810 	if (!zpool_name_valid(hdl, B_FALSE, pool))
811 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
812 
813 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
814 		return (-1);
815 
816 	if (props && (props = zpool_validate_properties(hdl, pool, props,
817 	    SPA_VERSION_1, B_TRUE, msg)) == NULL)
818 		return (-1);
819 
820 	if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
821 		nvlist_free(props);
822 		return (-1);
823 	}
824 
825 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
826 
827 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc) != 0) {
828 
829 		zcmd_free_nvlists(&zc);
830 		nvlist_free(props);
831 
832 		switch (errno) {
833 		case EBUSY:
834 			/*
835 			 * This can happen if the user has specified the same
836 			 * device multiple times.  We can't reliably detect this
837 			 * until we try to add it and see we already have a
838 			 * label.
839 			 */
840 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
841 			    "one or more vdevs refer to the same device"));
842 			return (zfs_error(hdl, EZFS_BADDEV, msg));
843 
844 		case EOVERFLOW:
845 			/*
846 			 * This occurs when one of the devices is below
847 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
848 			 * device was the problem device since there's no
849 			 * reliable way to determine device size from userland.
850 			 */
851 			{
852 				char buf[64];
853 
854 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
855 
856 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
857 				    "one or more devices is less than the "
858 				    "minimum size (%s)"), buf);
859 			}
860 			return (zfs_error(hdl, EZFS_BADDEV, msg));
861 
862 		case ENOSPC:
863 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
864 			    "one or more devices is out of space"));
865 			return (zfs_error(hdl, EZFS_BADDEV, msg));
866 
867 		case ENOTBLK:
868 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
869 			    "cache device must be a disk or disk slice"));
870 			return (zfs_error(hdl, EZFS_BADDEV, msg));
871 
872 		default:
873 			return (zpool_standard_error(hdl, errno, msg));
874 		}
875 	}
876 
877 	/*
878 	 * If this is an alternate root pool, then we automatically set the
879 	 * mountpoint of the root dataset to be '/'.
880 	 */
881 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
882 	    &altroot) == 0) {
883 		zfs_handle_t *zhp;
884 
885 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
886 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
887 		    "/") == 0);
888 
889 		zfs_close(zhp);
890 	}
891 
892 	zcmd_free_nvlists(&zc);
893 	nvlist_free(props);
894 	return (0);
895 }
896 
897 /*
898  * Destroy the given pool.  It is up to the caller to ensure that there are no
899  * datasets left in the pool.
900  */
901 int
902 zpool_destroy(zpool_handle_t *zhp)
903 {
904 	zfs_cmd_t zc = { 0 };
905 	zfs_handle_t *zfp = NULL;
906 	libzfs_handle_t *hdl = zhp->zpool_hdl;
907 	char msg[1024];
908 
909 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
910 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
911 	    ZFS_TYPE_FILESYSTEM)) == NULL)
912 		return (-1);
913 
914 	if (zpool_remove_zvol_links(zhp) != 0)
915 		return (-1);
916 
917 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
918 
919 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
920 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
921 		    "cannot destroy '%s'"), zhp->zpool_name);
922 
923 		if (errno == EROFS) {
924 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
925 			    "one or more devices is read only"));
926 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
927 		} else {
928 			(void) zpool_standard_error(hdl, errno, msg);
929 		}
930 
931 		if (zfp)
932 			zfs_close(zfp);
933 		return (-1);
934 	}
935 
936 	if (zfp) {
937 		remove_mountpoint(zfp);
938 		zfs_close(zfp);
939 	}
940 
941 	return (0);
942 }
943 
944 /*
945  * Add the given vdevs to the pool.  The caller must have already performed the
946  * necessary verification to ensure that the vdev specification is well-formed.
947  */
948 int
949 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
950 {
951 	zfs_cmd_t zc = { 0 };
952 	int ret;
953 	libzfs_handle_t *hdl = zhp->zpool_hdl;
954 	char msg[1024];
955 	nvlist_t **spares, **l2cache;
956 	uint_t nspares, nl2cache;
957 
958 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
959 	    "cannot add to '%s'"), zhp->zpool_name);
960 
961 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
962 	    SPA_VERSION_SPARES &&
963 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
964 	    &spares, &nspares) == 0) {
965 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
966 		    "upgraded to add hot spares"));
967 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
968 	}
969 
970 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
971 	    SPA_VERSION_L2CACHE &&
972 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
973 	    &l2cache, &nl2cache) == 0) {
974 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
975 		    "upgraded to add cache devices"));
976 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
977 	}
978 
979 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
980 		return (-1);
981 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
982 
983 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
984 		switch (errno) {
985 		case EBUSY:
986 			/*
987 			 * This can happen if the user has specified the same
988 			 * device multiple times.  We can't reliably detect this
989 			 * until we try to add it and see we already have a
990 			 * label.
991 			 */
992 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
993 			    "one or more vdevs refer to the same device"));
994 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
995 			break;
996 
997 		case EOVERFLOW:
998 			/*
999 			 * This occurrs when one of the devices is below
1000 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1001 			 * device was the problem device since there's no
1002 			 * reliable way to determine device size from userland.
1003 			 */
1004 			{
1005 				char buf[64];
1006 
1007 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1008 
1009 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1010 				    "device is less than the minimum "
1011 				    "size (%s)"), buf);
1012 			}
1013 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1014 			break;
1015 
1016 		case ENOTSUP:
1017 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1018 			    "pool must be upgraded to add these vdevs"));
1019 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1020 			break;
1021 
1022 		case EDOM:
1023 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1024 			    "root pool can not have multiple vdevs"
1025 			    " or separate logs"));
1026 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1027 			break;
1028 
1029 		case ENOTBLK:
1030 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1031 			    "cache device must be a disk or disk slice"));
1032 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1033 			break;
1034 
1035 		default:
1036 			(void) zpool_standard_error(hdl, errno, msg);
1037 		}
1038 
1039 		ret = -1;
1040 	} else {
1041 		ret = 0;
1042 	}
1043 
1044 	zcmd_free_nvlists(&zc);
1045 
1046 	return (ret);
1047 }
1048 
1049 /*
1050  * Exports the pool from the system.  The caller must ensure that there are no
1051  * mounted datasets in the pool.
1052  */
1053 int
1054 zpool_export(zpool_handle_t *zhp)
1055 {
1056 	zfs_cmd_t zc = { 0 };
1057 
1058 	if (zpool_remove_zvol_links(zhp) != 0)
1059 		return (-1);
1060 
1061 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1062 
1063 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0)
1064 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1065 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
1066 		    zhp->zpool_name));
1067 	return (0);
1068 }
1069 
1070 /*
1071  * zpool_import() is a contracted interface. Should be kept the same
1072  * if possible.
1073  *
1074  * Applications should use zpool_import_props() to import a pool with
1075  * new properties value to be set.
1076  */
1077 int
1078 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1079     char *altroot)
1080 {
1081 	nvlist_t *props = NULL;
1082 	int ret;
1083 
1084 	if (altroot != NULL) {
1085 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1086 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1087 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1088 			    newname));
1089 		}
1090 
1091 		if (nvlist_add_string(props,
1092 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1093 			nvlist_free(props);
1094 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1095 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1096 			    newname));
1097 		}
1098 	}
1099 
1100 	ret = zpool_import_props(hdl, config, newname, props, B_FALSE);
1101 	if (props)
1102 		nvlist_free(props);
1103 	return (ret);
1104 }
1105 
1106 /*
1107  * Import the given pool using the known configuration and a list of
1108  * properties to be set. The configuration should have come from
1109  * zpool_find_import(). The 'newname' parameters control whether the pool
1110  * is imported with a different name.
1111  */
1112 int
1113 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1114     nvlist_t *props, boolean_t importfaulted)
1115 {
1116 	zfs_cmd_t zc = { 0 };
1117 	char *thename;
1118 	char *origname;
1119 	int ret;
1120 	char errbuf[1024];
1121 
1122 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1123 	    &origname) == 0);
1124 
1125 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1126 	    "cannot import pool '%s'"), origname);
1127 
1128 	if (newname != NULL) {
1129 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1130 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1131 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1132 			    newname));
1133 		thename = (char *)newname;
1134 	} else {
1135 		thename = origname;
1136 	}
1137 
1138 	if (props) {
1139 		uint64_t version;
1140 
1141 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1142 		    &version) == 0);
1143 
1144 		if ((props = zpool_validate_properties(hdl, origname,
1145 		    props, version, B_TRUE, errbuf)) == NULL) {
1146 			return (-1);
1147 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1148 			nvlist_free(props);
1149 			return (-1);
1150 		}
1151 	}
1152 
1153 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1154 
1155 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1156 	    &zc.zc_guid) == 0);
1157 
1158 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1159 		nvlist_free(props);
1160 		return (-1);
1161 	}
1162 
1163 	zc.zc_cookie = (uint64_t)importfaulted;
1164 	ret = 0;
1165 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1166 		char desc[1024];
1167 		if (newname == NULL)
1168 			(void) snprintf(desc, sizeof (desc),
1169 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1170 			    thename);
1171 		else
1172 			(void) snprintf(desc, sizeof (desc),
1173 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1174 			    origname, thename);
1175 
1176 		switch (errno) {
1177 		case ENOTSUP:
1178 			/*
1179 			 * Unsupported version.
1180 			 */
1181 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1182 			break;
1183 
1184 		case EINVAL:
1185 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1186 			break;
1187 
1188 		default:
1189 			(void) zpool_standard_error(hdl, errno, desc);
1190 		}
1191 
1192 		ret = -1;
1193 	} else {
1194 		zpool_handle_t *zhp;
1195 
1196 		/*
1197 		 * This should never fail, but play it safe anyway.
1198 		 */
1199 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1200 			ret = -1;
1201 		} else if (zhp != NULL) {
1202 			ret = zpool_create_zvol_links(zhp);
1203 			zpool_close(zhp);
1204 		}
1205 
1206 	}
1207 
1208 	zcmd_free_nvlists(&zc);
1209 	nvlist_free(props);
1210 
1211 	return (ret);
1212 }
1213 
1214 /*
1215  * Scrub the pool.
1216  */
1217 int
1218 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1219 {
1220 	zfs_cmd_t zc = { 0 };
1221 	char msg[1024];
1222 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1223 
1224 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1225 	zc.zc_cookie = type;
1226 
1227 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1228 		return (0);
1229 
1230 	(void) snprintf(msg, sizeof (msg),
1231 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1232 
1233 	if (errno == EBUSY)
1234 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1235 	else
1236 		return (zpool_standard_error(hdl, errno, msg));
1237 }
1238 
1239 /*
1240  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1241  * spare; but FALSE if its an INUSE spare.
1242  */
1243 static nvlist_t *
1244 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1245     boolean_t *avail_spare, boolean_t *l2cache)
1246 {
1247 	uint_t c, children;
1248 	nvlist_t **child;
1249 	uint64_t theguid, present;
1250 	char *path;
1251 	uint64_t wholedisk = 0;
1252 	nvlist_t *ret;
1253 
1254 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1255 
1256 	if (search == NULL &&
1257 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1258 		/*
1259 		 * If the device has never been present since import, the only
1260 		 * reliable way to match the vdev is by GUID.
1261 		 */
1262 		if (theguid == guid)
1263 			return (nv);
1264 	} else if (search != NULL &&
1265 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1266 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1267 		    &wholedisk);
1268 		if (wholedisk) {
1269 			/*
1270 			 * For whole disks, the internal path has 's0', but the
1271 			 * path passed in by the user doesn't.
1272 			 */
1273 			if (strlen(search) == strlen(path) - 2 &&
1274 			    strncmp(search, path, strlen(search)) == 0)
1275 				return (nv);
1276 		} else if (strcmp(search, path) == 0) {
1277 			return (nv);
1278 		}
1279 	}
1280 
1281 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1282 	    &child, &children) != 0)
1283 		return (NULL);
1284 
1285 	for (c = 0; c < children; c++)
1286 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1287 		    avail_spare, l2cache)) != NULL)
1288 			return (ret);
1289 
1290 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1291 	    &child, &children) == 0) {
1292 		for (c = 0; c < children; c++) {
1293 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1294 			    avail_spare, l2cache)) != NULL) {
1295 				*avail_spare = B_TRUE;
1296 				return (ret);
1297 			}
1298 		}
1299 	}
1300 
1301 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
1302 	    &child, &children) == 0) {
1303 		for (c = 0; c < children; c++) {
1304 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1305 			    avail_spare, l2cache)) != NULL) {
1306 				*l2cache = B_TRUE;
1307 				return (ret);
1308 			}
1309 		}
1310 	}
1311 
1312 	return (NULL);
1313 }
1314 
1315 nvlist_t *
1316 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
1317     boolean_t *l2cache)
1318 {
1319 	char buf[MAXPATHLEN];
1320 	const char *search;
1321 	char *end;
1322 	nvlist_t *nvroot;
1323 	uint64_t guid;
1324 
1325 	guid = strtoull(path, &end, 10);
1326 	if (guid != 0 && *end == '\0') {
1327 		search = NULL;
1328 	} else if (path[0] != '/') {
1329 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1330 		search = buf;
1331 	} else {
1332 		search = path;
1333 	}
1334 
1335 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1336 	    &nvroot) == 0);
1337 
1338 	*avail_spare = B_FALSE;
1339 	*l2cache = B_FALSE;
1340 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare,
1341 	    l2cache));
1342 }
1343 
1344 /*
1345  * Returns TRUE if the given guid corresponds to the given type.
1346  * This is used to check for hot spares (INUSE or not), and level 2 cache
1347  * devices.
1348  */
1349 static boolean_t
1350 is_guid_type(zpool_handle_t *zhp, uint64_t guid, const char *type)
1351 {
1352 	uint64_t target_guid;
1353 	nvlist_t *nvroot;
1354 	nvlist_t **list;
1355 	uint_t count;
1356 	int i;
1357 
1358 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1359 	    &nvroot) == 0);
1360 	if (nvlist_lookup_nvlist_array(nvroot, type, &list, &count) == 0) {
1361 		for (i = 0; i < count; i++) {
1362 			verify(nvlist_lookup_uint64(list[i], ZPOOL_CONFIG_GUID,
1363 			    &target_guid) == 0);
1364 			if (guid == target_guid)
1365 				return (B_TRUE);
1366 		}
1367 	}
1368 
1369 	return (B_FALSE);
1370 }
1371 
1372 /*
1373  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1374  * ZFS_ONLINE_* flags.
1375  */
1376 int
1377 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1378     vdev_state_t *newstate)
1379 {
1380 	zfs_cmd_t zc = { 0 };
1381 	char msg[1024];
1382 	nvlist_t *tgt;
1383 	boolean_t avail_spare, l2cache;
1384 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1385 
1386 	(void) snprintf(msg, sizeof (msg),
1387 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1388 
1389 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1390 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL)
1391 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1392 
1393 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1394 
1395 	if (avail_spare ||
1396 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1397 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1398 
1399 	zc.zc_cookie = VDEV_STATE_ONLINE;
1400 	zc.zc_obj = flags;
1401 
1402 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1403 		return (zpool_standard_error(hdl, errno, msg));
1404 
1405 	*newstate = zc.zc_cookie;
1406 	return (0);
1407 }
1408 
1409 /*
1410  * Take the specified vdev offline
1411  */
1412 int
1413 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1414 {
1415 	zfs_cmd_t zc = { 0 };
1416 	char msg[1024];
1417 	nvlist_t *tgt;
1418 	boolean_t avail_spare, l2cache;
1419 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1420 
1421 	(void) snprintf(msg, sizeof (msg),
1422 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1423 
1424 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1425 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == NULL)
1426 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1427 
1428 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1429 
1430 	if (avail_spare ||
1431 	    is_guid_type(zhp, zc.zc_guid, ZPOOL_CONFIG_SPARES) == B_TRUE)
1432 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1433 
1434 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1435 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1436 
1437 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1438 		return (0);
1439 
1440 	switch (errno) {
1441 	case EBUSY:
1442 
1443 		/*
1444 		 * There are no other replicas of this device.
1445 		 */
1446 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1447 
1448 	default:
1449 		return (zpool_standard_error(hdl, errno, msg));
1450 	}
1451 }
1452 
1453 /*
1454  * Mark the given vdev faulted.
1455  */
1456 int
1457 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1458 {
1459 	zfs_cmd_t zc = { 0 };
1460 	char msg[1024];
1461 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1462 
1463 	(void) snprintf(msg, sizeof (msg),
1464 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1465 
1466 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1467 	zc.zc_guid = guid;
1468 	zc.zc_cookie = VDEV_STATE_FAULTED;
1469 
1470 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1471 		return (0);
1472 
1473 	switch (errno) {
1474 	case EBUSY:
1475 
1476 		/*
1477 		 * There are no other replicas of this device.
1478 		 */
1479 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1480 
1481 	default:
1482 		return (zpool_standard_error(hdl, errno, msg));
1483 	}
1484 
1485 }
1486 
1487 /*
1488  * Mark the given vdev degraded.
1489  */
1490 int
1491 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1492 {
1493 	zfs_cmd_t zc = { 0 };
1494 	char msg[1024];
1495 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1496 
1497 	(void) snprintf(msg, sizeof (msg),
1498 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1499 
1500 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1501 	zc.zc_guid = guid;
1502 	zc.zc_cookie = VDEV_STATE_DEGRADED;
1503 
1504 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1505 		return (0);
1506 
1507 	return (zpool_standard_error(hdl, errno, msg));
1508 }
1509 
1510 /*
1511  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1512  * a hot spare.
1513  */
1514 static boolean_t
1515 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1516 {
1517 	nvlist_t **child;
1518 	uint_t c, children;
1519 	char *type;
1520 
1521 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1522 	    &children) == 0) {
1523 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1524 		    &type) == 0);
1525 
1526 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1527 		    children == 2 && child[which] == tgt)
1528 			return (B_TRUE);
1529 
1530 		for (c = 0; c < children; c++)
1531 			if (is_replacing_spare(child[c], tgt, which))
1532 				return (B_TRUE);
1533 	}
1534 
1535 	return (B_FALSE);
1536 }
1537 
1538 /*
1539  * Attach new_disk (fully described by nvroot) to old_disk.
1540  * If 'replacing' is specified, the new disk will replace the old one.
1541  */
1542 int
1543 zpool_vdev_attach(zpool_handle_t *zhp,
1544     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1545 {
1546 	zfs_cmd_t zc = { 0 };
1547 	char msg[1024];
1548 	int ret;
1549 	nvlist_t *tgt;
1550 	boolean_t avail_spare, l2cache;
1551 	uint64_t val, is_log;
1552 	char *path, *newname;
1553 	nvlist_t **child;
1554 	uint_t children;
1555 	nvlist_t *config_root;
1556 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1557 
1558 	if (replacing)
1559 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1560 		    "cannot replace %s with %s"), old_disk, new_disk);
1561 	else
1562 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1563 		    "cannot attach %s to %s"), new_disk, old_disk);
1564 
1565 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1566 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache)) == 0)
1567 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1568 
1569 	if (avail_spare)
1570 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1571 
1572 	if (l2cache)
1573 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1574 
1575 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1576 	zc.zc_cookie = replacing;
1577 
1578 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1579 	    &child, &children) != 0 || children != 1) {
1580 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1581 		    "new device must be a single disk"));
1582 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1583 	}
1584 
1585 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1586 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1587 
1588 	if ((newname = zpool_vdev_name(NULL, NULL, child[0])) == NULL)
1589 		return (-1);
1590 
1591 	/*
1592 	 * If the target is a hot spare that has been swapped in, we can only
1593 	 * replace it with another hot spare.
1594 	 */
1595 	if (replacing &&
1596 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1597 	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache) == NULL ||
1598 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1599 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1600 		    "can only be replaced by another hot spare"));
1601 		free(newname);
1602 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1603 	}
1604 
1605 	/*
1606 	 * If we are attempting to replace a spare, it canot be applied to an
1607 	 * already spared device.
1608 	 */
1609 	if (replacing &&
1610 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1611 	    zpool_find_vdev(zhp, newname, &avail_spare, &l2cache) != NULL &&
1612 	    avail_spare && is_replacing_spare(config_root, tgt, 0)) {
1613 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1614 		    "device has already been replaced with a spare"));
1615 		free(newname);
1616 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1617 	}
1618 
1619 	free(newname);
1620 
1621 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1622 		return (-1);
1623 
1624 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1625 
1626 	zcmd_free_nvlists(&zc);
1627 
1628 	if (ret == 0)
1629 		return (0);
1630 
1631 	switch (errno) {
1632 	case ENOTSUP:
1633 		/*
1634 		 * Can't attach to or replace this type of vdev.
1635 		 */
1636 		if (replacing) {
1637 			is_log = B_FALSE;
1638 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1639 			    &is_log);
1640 			if (is_log)
1641 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1642 				    "cannot replace a log with a spare"));
1643 			else
1644 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1645 				    "cannot replace a replacing device"));
1646 		} else {
1647 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1648 			    "can only attach to mirrors and top-level "
1649 			    "disks"));
1650 		}
1651 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1652 		break;
1653 
1654 	case EINVAL:
1655 		/*
1656 		 * The new device must be a single disk.
1657 		 */
1658 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1659 		    "new device must be a single disk"));
1660 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1661 		break;
1662 
1663 	case EBUSY:
1664 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1665 		    new_disk);
1666 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1667 		break;
1668 
1669 	case EOVERFLOW:
1670 		/*
1671 		 * The new device is too small.
1672 		 */
1673 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1674 		    "device is too small"));
1675 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1676 		break;
1677 
1678 	case EDOM:
1679 		/*
1680 		 * The new device has a different alignment requirement.
1681 		 */
1682 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1683 		    "devices have different sector alignment"));
1684 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1685 		break;
1686 
1687 	case ENAMETOOLONG:
1688 		/*
1689 		 * The resulting top-level vdev spec won't fit in the label.
1690 		 */
1691 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1692 		break;
1693 
1694 	default:
1695 		(void) zpool_standard_error(hdl, errno, msg);
1696 	}
1697 
1698 	return (-1);
1699 }
1700 
1701 /*
1702  * Detach the specified device.
1703  */
1704 int
1705 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1706 {
1707 	zfs_cmd_t zc = { 0 };
1708 	char msg[1024];
1709 	nvlist_t *tgt;
1710 	boolean_t avail_spare, l2cache;
1711 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1712 
1713 	(void) snprintf(msg, sizeof (msg),
1714 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1715 
1716 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1717 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0)
1718 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1719 
1720 	if (avail_spare)
1721 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1722 
1723 	if (l2cache)
1724 		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
1725 
1726 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1727 
1728 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1729 		return (0);
1730 
1731 	switch (errno) {
1732 
1733 	case ENOTSUP:
1734 		/*
1735 		 * Can't detach from this type of vdev.
1736 		 */
1737 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1738 		    "applicable to mirror and replacing vdevs"));
1739 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1740 		break;
1741 
1742 	case EBUSY:
1743 		/*
1744 		 * There are no other replicas of this device.
1745 		 */
1746 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1747 		break;
1748 
1749 	default:
1750 		(void) zpool_standard_error(hdl, errno, msg);
1751 	}
1752 
1753 	return (-1);
1754 }
1755 
1756 /*
1757  * Remove the given device.  Currently, this is supported only for hot spares
1758  * and level 2 cache devices.
1759  */
1760 int
1761 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1762 {
1763 	zfs_cmd_t zc = { 0 };
1764 	char msg[1024];
1765 	nvlist_t *tgt;
1766 	boolean_t avail_spare, l2cache;
1767 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1768 
1769 	(void) snprintf(msg, sizeof (msg),
1770 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1771 
1772 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1773 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache)) == 0)
1774 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1775 
1776 	if (!avail_spare && !l2cache) {
1777 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1778 		    "only inactive hot spares or cache devices "
1779 		    "can be removed"));
1780 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1781 	}
1782 
1783 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1784 
1785 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1786 		return (0);
1787 
1788 	return (zpool_standard_error(hdl, errno, msg));
1789 }
1790 
1791 /*
1792  * Clear the errors for the pool, or the particular device if specified.
1793  */
1794 int
1795 zpool_clear(zpool_handle_t *zhp, const char *path)
1796 {
1797 	zfs_cmd_t zc = { 0 };
1798 	char msg[1024];
1799 	nvlist_t *tgt;
1800 	boolean_t avail_spare, l2cache;
1801 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1802 
1803 	if (path)
1804 		(void) snprintf(msg, sizeof (msg),
1805 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1806 		    path);
1807 	else
1808 		(void) snprintf(msg, sizeof (msg),
1809 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1810 		    zhp->zpool_name);
1811 
1812 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1813 	if (path) {
1814 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
1815 		    &l2cache)) == 0)
1816 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1817 
1818 		/*
1819 		 * Don't allow error clearing for hot spares.  Do allow
1820 		 * error clearing for l2cache devices.
1821 		 */
1822 		if (avail_spare)
1823 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1824 
1825 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1826 		    &zc.zc_guid) == 0);
1827 	}
1828 
1829 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1830 		return (0);
1831 
1832 	return (zpool_standard_error(hdl, errno, msg));
1833 }
1834 
1835 /*
1836  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1837  */
1838 int
1839 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1840 {
1841 	zfs_cmd_t zc = { 0 };
1842 	char msg[1024];
1843 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1844 
1845 	(void) snprintf(msg, sizeof (msg),
1846 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1847 	    guid);
1848 
1849 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1850 	zc.zc_guid = guid;
1851 
1852 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1853 		return (0);
1854 
1855 	return (zpool_standard_error(hdl, errno, msg));
1856 }
1857 
1858 /*
1859  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1860  * hierarchy.
1861  */
1862 int
1863 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1864     void *data)
1865 {
1866 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1867 	char (*paths)[MAXPATHLEN];
1868 	size_t size = 4;
1869 	int curr, fd, base, ret = 0;
1870 	DIR *dirp;
1871 	struct dirent *dp;
1872 	struct stat st;
1873 
1874 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1875 		return (errno == ENOENT ? 0 : -1);
1876 
1877 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1878 		int err = errno;
1879 		(void) close(base);
1880 		return (err == ENOENT ? 0 : -1);
1881 	}
1882 
1883 	/*
1884 	 * Oddly this wasn't a directory -- ignore that failure since we
1885 	 * know there are no links lower in the (non-existant) hierarchy.
1886 	 */
1887 	if (!S_ISDIR(st.st_mode)) {
1888 		(void) close(base);
1889 		return (0);
1890 	}
1891 
1892 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1893 		(void) close(base);
1894 		return (-1);
1895 	}
1896 
1897 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1898 	curr = 0;
1899 
1900 	while (curr >= 0) {
1901 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1902 			goto err;
1903 
1904 		if (S_ISDIR(st.st_mode)) {
1905 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1906 				goto err;
1907 
1908 			if ((dirp = fdopendir(fd)) == NULL) {
1909 				(void) close(fd);
1910 				goto err;
1911 			}
1912 
1913 			while ((dp = readdir(dirp)) != NULL) {
1914 				if (dp->d_name[0] == '.')
1915 					continue;
1916 
1917 				if (curr + 1 == size) {
1918 					paths = zfs_realloc(hdl, paths,
1919 					    size * sizeof (paths[0]),
1920 					    size * 2 * sizeof (paths[0]));
1921 					if (paths == NULL) {
1922 						(void) closedir(dirp);
1923 						(void) close(fd);
1924 						goto err;
1925 					}
1926 
1927 					size *= 2;
1928 				}
1929 
1930 				(void) strlcpy(paths[curr + 1], paths[curr],
1931 				    sizeof (paths[curr + 1]));
1932 				(void) strlcat(paths[curr], "/",
1933 				    sizeof (paths[curr]));
1934 				(void) strlcat(paths[curr], dp->d_name,
1935 				    sizeof (paths[curr]));
1936 				curr++;
1937 			}
1938 
1939 			(void) closedir(dirp);
1940 
1941 		} else {
1942 			if ((ret = cb(paths[curr], data)) != 0)
1943 				break;
1944 		}
1945 
1946 		curr--;
1947 	}
1948 
1949 	free(paths);
1950 	(void) close(base);
1951 
1952 	return (ret);
1953 
1954 err:
1955 	free(paths);
1956 	(void) close(base);
1957 	return (-1);
1958 }
1959 
1960 typedef struct zvol_cb {
1961 	zpool_handle_t *zcb_pool;
1962 	boolean_t zcb_create;
1963 } zvol_cb_t;
1964 
1965 /*ARGSUSED*/
1966 static int
1967 do_zvol_create(zfs_handle_t *zhp, void *data)
1968 {
1969 	int ret = 0;
1970 
1971 	if (ZFS_IS_VOLUME(zhp)) {
1972 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1973 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
1974 	}
1975 
1976 	if (ret == 0)
1977 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
1978 
1979 	zfs_close(zhp);
1980 
1981 	return (ret);
1982 }
1983 
1984 /*
1985  * Iterate over all zvols in the pool and make any necessary minor nodes.
1986  */
1987 int
1988 zpool_create_zvol_links(zpool_handle_t *zhp)
1989 {
1990 	zfs_handle_t *zfp;
1991 	int ret;
1992 
1993 	/*
1994 	 * If the pool is unavailable, just return success.
1995 	 */
1996 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1997 	    zhp->zpool_name)) == NULL)
1998 		return (0);
1999 
2000 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
2001 
2002 	zfs_close(zfp);
2003 	return (ret);
2004 }
2005 
2006 static int
2007 do_zvol_remove(const char *dataset, void *data)
2008 {
2009 	zpool_handle_t *zhp = data;
2010 
2011 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
2012 }
2013 
2014 /*
2015  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
2016  * by examining the /dev links so that a corrupted pool doesn't impede this
2017  * operation.
2018  */
2019 int
2020 zpool_remove_zvol_links(zpool_handle_t *zhp)
2021 {
2022 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
2023 }
2024 
2025 /*
2026  * Convert from a devid string to a path.
2027  */
2028 static char *
2029 devid_to_path(char *devid_str)
2030 {
2031 	ddi_devid_t devid;
2032 	char *minor;
2033 	char *path;
2034 	devid_nmlist_t *list = NULL;
2035 	int ret;
2036 
2037 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
2038 		return (NULL);
2039 
2040 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
2041 
2042 	devid_str_free(minor);
2043 	devid_free(devid);
2044 
2045 	if (ret != 0)
2046 		return (NULL);
2047 
2048 	if ((path = strdup(list[0].devname)) == NULL)
2049 		return (NULL);
2050 
2051 	devid_free_nmlist(list);
2052 
2053 	return (path);
2054 }
2055 
2056 /*
2057  * Convert from a path to a devid string.
2058  */
2059 static char *
2060 path_to_devid(const char *path)
2061 {
2062 	int fd;
2063 	ddi_devid_t devid;
2064 	char *minor, *ret;
2065 
2066 	if ((fd = open(path, O_RDONLY)) < 0)
2067 		return (NULL);
2068 
2069 	minor = NULL;
2070 	ret = NULL;
2071 	if (devid_get(fd, &devid) == 0) {
2072 		if (devid_get_minor_name(fd, &minor) == 0)
2073 			ret = devid_str_encode(devid, minor);
2074 		if (minor != NULL)
2075 			devid_str_free(minor);
2076 		devid_free(devid);
2077 	}
2078 	(void) close(fd);
2079 
2080 	return (ret);
2081 }
2082 
2083 /*
2084  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
2085  * ignore any failure here, since a common case is for an unprivileged user to
2086  * type 'zpool status', and we'll display the correct information anyway.
2087  */
2088 static void
2089 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
2090 {
2091 	zfs_cmd_t zc = { 0 };
2092 
2093 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2094 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
2095 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2096 	    &zc.zc_guid) == 0);
2097 
2098 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
2099 }
2100 
2101 /*
2102  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
2103  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
2104  * We also check if this is a whole disk, in which case we strip off the
2105  * trailing 's0' slice name.
2106  *
2107  * This routine is also responsible for identifying when disks have been
2108  * reconfigured in a new location.  The kernel will have opened the device by
2109  * devid, but the path will still refer to the old location.  To catch this, we
2110  * first do a path -> devid translation (which is fast for the common case).  If
2111  * the devid matches, we're done.  If not, we do a reverse devid -> path
2112  * translation and issue the appropriate ioctl() to update the path of the vdev.
2113  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
2114  * of these checks.
2115  */
2116 char *
2117 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2118 {
2119 	char *path, *devid;
2120 	uint64_t value;
2121 	char buf[64];
2122 	vdev_stat_t *vs;
2123 	uint_t vsc;
2124 
2125 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2126 	    &value) == 0) {
2127 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2128 		    &value) == 0);
2129 		(void) snprintf(buf, sizeof (buf), "%llu",
2130 		    (u_longlong_t)value);
2131 		path = buf;
2132 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2133 
2134 		/*
2135 		 * If the device is dead (faulted, offline, etc) then don't
2136 		 * bother opening it.  Otherwise we may be forcing the user to
2137 		 * open a misbehaving device, which can have undesirable
2138 		 * effects.
2139 		 */
2140 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2141 		    (uint64_t **)&vs, &vsc) != 0 ||
2142 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2143 		    zhp != NULL &&
2144 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2145 			/*
2146 			 * Determine if the current path is correct.
2147 			 */
2148 			char *newdevid = path_to_devid(path);
2149 
2150 			if (newdevid == NULL ||
2151 			    strcmp(devid, newdevid) != 0) {
2152 				char *newpath;
2153 
2154 				if ((newpath = devid_to_path(devid)) != NULL) {
2155 					/*
2156 					 * Update the path appropriately.
2157 					 */
2158 					set_path(zhp, nv, newpath);
2159 					if (nvlist_add_string(nv,
2160 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2161 						verify(nvlist_lookup_string(nv,
2162 						    ZPOOL_CONFIG_PATH,
2163 						    &path) == 0);
2164 					free(newpath);
2165 				}
2166 			}
2167 
2168 			if (newdevid)
2169 				devid_str_free(newdevid);
2170 		}
2171 
2172 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2173 			path += 9;
2174 
2175 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2176 		    &value) == 0 && value) {
2177 			char *tmp = zfs_strdup(hdl, path);
2178 			if (tmp == NULL)
2179 				return (NULL);
2180 			tmp[strlen(path) - 2] = '\0';
2181 			return (tmp);
2182 		}
2183 	} else {
2184 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2185 
2186 		/*
2187 		 * If it's a raidz device, we need to stick in the parity level.
2188 		 */
2189 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2190 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2191 			    &value) == 0);
2192 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2193 			    (u_longlong_t)value);
2194 			path = buf;
2195 		}
2196 	}
2197 
2198 	return (zfs_strdup(hdl, path));
2199 }
2200 
2201 static int
2202 zbookmark_compare(const void *a, const void *b)
2203 {
2204 	return (memcmp(a, b, sizeof (zbookmark_t)));
2205 }
2206 
2207 /*
2208  * Retrieve the persistent error log, uniquify the members, and return to the
2209  * caller.
2210  */
2211 int
2212 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2213 {
2214 	zfs_cmd_t zc = { 0 };
2215 	uint64_t count;
2216 	zbookmark_t *zb = NULL;
2217 	int i;
2218 
2219 	/*
2220 	 * Retrieve the raw error list from the kernel.  If the number of errors
2221 	 * has increased, allocate more space and continue until we get the
2222 	 * entire list.
2223 	 */
2224 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2225 	    &count) == 0);
2226 	if (count == 0)
2227 		return (0);
2228 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2229 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2230 		return (-1);
2231 	zc.zc_nvlist_dst_size = count;
2232 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2233 	for (;;) {
2234 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2235 		    &zc) != 0) {
2236 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2237 			if (errno == ENOMEM) {
2238 				count = zc.zc_nvlist_dst_size;
2239 				if ((zc.zc_nvlist_dst = (uintptr_t)
2240 				    zfs_alloc(zhp->zpool_hdl, count *
2241 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2242 					return (-1);
2243 			} else {
2244 				return (-1);
2245 			}
2246 		} else {
2247 			break;
2248 		}
2249 	}
2250 
2251 	/*
2252 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2253 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2254 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2255 	 * _not_ copied as part of the process.  So we point the start of our
2256 	 * array appropriate and decrement the total number of elements.
2257 	 */
2258 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2259 	    zc.zc_nvlist_dst_size;
2260 	count -= zc.zc_nvlist_dst_size;
2261 
2262 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2263 
2264 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2265 
2266 	/*
2267 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2268 	 */
2269 	for (i = 0; i < count; i++) {
2270 		nvlist_t *nv;
2271 
2272 		/* ignoring zb_blkid and zb_level for now */
2273 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2274 		    zb[i-1].zb_object == zb[i].zb_object)
2275 			continue;
2276 
2277 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2278 			goto nomem;
2279 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2280 		    zb[i].zb_objset) != 0) {
2281 			nvlist_free(nv);
2282 			goto nomem;
2283 		}
2284 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2285 		    zb[i].zb_object) != 0) {
2286 			nvlist_free(nv);
2287 			goto nomem;
2288 		}
2289 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2290 			nvlist_free(nv);
2291 			goto nomem;
2292 		}
2293 		nvlist_free(nv);
2294 	}
2295 
2296 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2297 	return (0);
2298 
2299 nomem:
2300 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2301 	return (no_memory(zhp->zpool_hdl));
2302 }
2303 
2304 /*
2305  * Upgrade a ZFS pool to the latest on-disk version.
2306  */
2307 int
2308 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2309 {
2310 	zfs_cmd_t zc = { 0 };
2311 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2312 
2313 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2314 	zc.zc_cookie = new_version;
2315 
2316 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2317 		return (zpool_standard_error_fmt(hdl, errno,
2318 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2319 		    zhp->zpool_name));
2320 	return (0);
2321 }
2322 
2323 void
2324 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2325     char *history_str)
2326 {
2327 	int i;
2328 
2329 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2330 	for (i = 1; i < argc; i++) {
2331 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2332 		    HIS_MAX_RECORD_LEN)
2333 			break;
2334 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2335 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2336 	}
2337 }
2338 
2339 /*
2340  * Stage command history for logging.
2341  */
2342 int
2343 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2344 {
2345 	if (history_str == NULL)
2346 		return (EINVAL);
2347 
2348 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2349 		return (EINVAL);
2350 
2351 	if (hdl->libzfs_log_str != NULL)
2352 		free(hdl->libzfs_log_str);
2353 
2354 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2355 		return (no_memory(hdl));
2356 
2357 	return (0);
2358 }
2359 
2360 /*
2361  * Perform ioctl to get some command history of a pool.
2362  *
2363  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2364  * logical offset of the history buffer to start reading from.
2365  *
2366  * Upon return, 'off' is the next logical offset to read from and
2367  * 'len' is the actual amount of bytes read into 'buf'.
2368  */
2369 static int
2370 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2371 {
2372 	zfs_cmd_t zc = { 0 };
2373 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2374 
2375 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2376 
2377 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2378 	zc.zc_history_len = *len;
2379 	zc.zc_history_offset = *off;
2380 
2381 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2382 		switch (errno) {
2383 		case EPERM:
2384 			return (zfs_error_fmt(hdl, EZFS_PERM,
2385 			    dgettext(TEXT_DOMAIN,
2386 			    "cannot show history for pool '%s'"),
2387 			    zhp->zpool_name));
2388 		case ENOENT:
2389 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2390 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2391 			    "'%s'"), zhp->zpool_name));
2392 		case ENOTSUP:
2393 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2394 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2395 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2396 		default:
2397 			return (zpool_standard_error_fmt(hdl, errno,
2398 			    dgettext(TEXT_DOMAIN,
2399 			    "cannot get history for '%s'"), zhp->zpool_name));
2400 		}
2401 	}
2402 
2403 	*len = zc.zc_history_len;
2404 	*off = zc.zc_history_offset;
2405 
2406 	return (0);
2407 }
2408 
2409 /*
2410  * Process the buffer of nvlists, unpacking and storing each nvlist record
2411  * into 'records'.  'leftover' is set to the number of bytes that weren't
2412  * processed as there wasn't a complete record.
2413  */
2414 static int
2415 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2416     nvlist_t ***records, uint_t *numrecords)
2417 {
2418 	uint64_t reclen;
2419 	nvlist_t *nv;
2420 	int i;
2421 
2422 	while (bytes_read > sizeof (reclen)) {
2423 
2424 		/* get length of packed record (stored as little endian) */
2425 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2426 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2427 
2428 		if (bytes_read < sizeof (reclen) + reclen)
2429 			break;
2430 
2431 		/* unpack record */
2432 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2433 			return (ENOMEM);
2434 		bytes_read -= sizeof (reclen) + reclen;
2435 		buf += sizeof (reclen) + reclen;
2436 
2437 		/* add record to nvlist array */
2438 		(*numrecords)++;
2439 		if (ISP2(*numrecords + 1)) {
2440 			*records = realloc(*records,
2441 			    *numrecords * 2 * sizeof (nvlist_t *));
2442 		}
2443 		(*records)[*numrecords - 1] = nv;
2444 	}
2445 
2446 	*leftover = bytes_read;
2447 	return (0);
2448 }
2449 
2450 #define	HIS_BUF_LEN	(128*1024)
2451 
2452 /*
2453  * Retrieve the command history of a pool.
2454  */
2455 int
2456 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2457 {
2458 	char buf[HIS_BUF_LEN];
2459 	uint64_t off = 0;
2460 	nvlist_t **records = NULL;
2461 	uint_t numrecords = 0;
2462 	int err, i;
2463 
2464 	do {
2465 		uint64_t bytes_read = sizeof (buf);
2466 		uint64_t leftover;
2467 
2468 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2469 			break;
2470 
2471 		/* if nothing else was read in, we're at EOF, just return */
2472 		if (!bytes_read)
2473 			break;
2474 
2475 		if ((err = zpool_history_unpack(buf, bytes_read,
2476 		    &leftover, &records, &numrecords)) != 0)
2477 			break;
2478 		off -= leftover;
2479 
2480 		/* CONSTCOND */
2481 	} while (1);
2482 
2483 	if (!err) {
2484 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2485 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2486 		    records, numrecords) == 0);
2487 	}
2488 	for (i = 0; i < numrecords; i++)
2489 		nvlist_free(records[i]);
2490 	free(records);
2491 
2492 	return (err);
2493 }
2494 
2495 void
2496 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2497     char *pathname, size_t len)
2498 {
2499 	zfs_cmd_t zc = { 0 };
2500 	boolean_t mounted = B_FALSE;
2501 	char *mntpnt = NULL;
2502 	char dsname[MAXNAMELEN];
2503 
2504 	if (dsobj == 0) {
2505 		/* special case for the MOS */
2506 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2507 		return;
2508 	}
2509 
2510 	/* get the dataset's name */
2511 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2512 	zc.zc_obj = dsobj;
2513 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2514 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2515 		/* just write out a path of two object numbers */
2516 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2517 		    dsobj, obj);
2518 		return;
2519 	}
2520 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2521 
2522 	/* find out if the dataset is mounted */
2523 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2524 
2525 	/* get the corrupted object's path */
2526 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2527 	zc.zc_obj = obj;
2528 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2529 	    &zc) == 0) {
2530 		if (mounted) {
2531 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2532 			    zc.zc_value);
2533 		} else {
2534 			(void) snprintf(pathname, len, "%s:%s",
2535 			    dsname, zc.zc_value);
2536 		}
2537 	} else {
2538 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2539 	}
2540 	free(mntpnt);
2541 }
2542 
2543 #define	RDISK_ROOT	"/dev/rdsk"
2544 #define	BACKUP_SLICE	"s2"
2545 /*
2546  * Don't start the slice at the default block of 34; many storage
2547  * devices will use a stripe width of 128k, so start there instead.
2548  */
2549 #define	NEW_START_BLOCK	256
2550 
2551 /*
2552  * Read the EFI label from the config, if a label does not exist then
2553  * pass back the error to the caller. If the caller has passed a non-NULL
2554  * diskaddr argument then we set it to the starting address of the EFI
2555  * partition.
2556  */
2557 static int
2558 read_efi_label(nvlist_t *config, diskaddr_t *sb)
2559 {
2560 	char *path;
2561 	int fd;
2562 	char diskname[MAXPATHLEN];
2563 	int err = -1;
2564 
2565 	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
2566 		return (err);
2567 
2568 	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
2569 	    strrchr(path, '/'));
2570 	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2571 		struct dk_gpt *vtoc;
2572 
2573 		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
2574 			if (sb != NULL)
2575 				*sb = vtoc->efi_parts[0].p_start;
2576 			efi_free(vtoc);
2577 		}
2578 		(void) close(fd);
2579 	}
2580 	return (err);
2581 }
2582 
2583 /*
2584  * determine where a partition starts on a disk in the current
2585  * configuration
2586  */
2587 static diskaddr_t
2588 find_start_block(nvlist_t *config)
2589 {
2590 	nvlist_t **child;
2591 	uint_t c, children;
2592 	diskaddr_t sb = MAXOFFSET_T;
2593 	uint64_t wholedisk;
2594 
2595 	if (nvlist_lookup_nvlist_array(config,
2596 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2597 		if (nvlist_lookup_uint64(config,
2598 		    ZPOOL_CONFIG_WHOLE_DISK,
2599 		    &wholedisk) != 0 || !wholedisk) {
2600 			return (MAXOFFSET_T);
2601 		}
2602 		if (read_efi_label(config, &sb) < 0)
2603 			sb = MAXOFFSET_T;
2604 		return (sb);
2605 	}
2606 
2607 	for (c = 0; c < children; c++) {
2608 		sb = find_start_block(child[c]);
2609 		if (sb != MAXOFFSET_T) {
2610 			return (sb);
2611 		}
2612 	}
2613 	return (MAXOFFSET_T);
2614 }
2615 
2616 /*
2617  * Label an individual disk.  The name provided is the short name,
2618  * stripped of any leading /dev path.
2619  */
2620 int
2621 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2622 {
2623 	char path[MAXPATHLEN];
2624 	struct dk_gpt *vtoc;
2625 	int fd;
2626 	size_t resv = EFI_MIN_RESV_SIZE;
2627 	uint64_t slice_size;
2628 	diskaddr_t start_block;
2629 	char errbuf[1024];
2630 
2631 	/* prepare an error message just in case */
2632 	(void) snprintf(errbuf, sizeof (errbuf),
2633 	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
2634 
2635 	if (zhp) {
2636 		nvlist_t *nvroot;
2637 
2638 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2639 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2640 
2641 		if (zhp->zpool_start_block == 0)
2642 			start_block = find_start_block(nvroot);
2643 		else
2644 			start_block = zhp->zpool_start_block;
2645 		zhp->zpool_start_block = start_block;
2646 	} else {
2647 		/* new pool */
2648 		start_block = NEW_START_BLOCK;
2649 	}
2650 
2651 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2652 	    BACKUP_SLICE);
2653 
2654 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2655 		/*
2656 		 * This shouldn't happen.  We've long since verified that this
2657 		 * is a valid device.
2658 		 */
2659 		zfs_error_aux(hdl,
2660 		    dgettext(TEXT_DOMAIN, "unable to open device"));
2661 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2662 	}
2663 
2664 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2665 		/*
2666 		 * The only way this can fail is if we run out of memory, or we
2667 		 * were unable to read the disk's capacity
2668 		 */
2669 		if (errno == ENOMEM)
2670 			(void) no_memory(hdl);
2671 
2672 		(void) close(fd);
2673 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2674 		    "unable to read disk capacity"), name);
2675 
2676 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2677 	}
2678 
2679 	slice_size = vtoc->efi_last_u_lba + 1;
2680 	slice_size -= EFI_MIN_RESV_SIZE;
2681 	if (start_block == MAXOFFSET_T)
2682 		start_block = NEW_START_BLOCK;
2683 	slice_size -= start_block;
2684 
2685 	vtoc->efi_parts[0].p_start = start_block;
2686 	vtoc->efi_parts[0].p_size = slice_size;
2687 
2688 	/*
2689 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2690 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2691 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2692 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2693 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2694 	 * can get, in the absence of V_OTHER.
2695 	 */
2696 	vtoc->efi_parts[0].p_tag = V_USR;
2697 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2698 
2699 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2700 	vtoc->efi_parts[8].p_size = resv;
2701 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2702 
2703 	if (efi_write(fd, vtoc) != 0) {
2704 		/*
2705 		 * Some block drivers (like pcata) may not support EFI
2706 		 * GPT labels.  Print out a helpful error message dir-
2707 		 * ecting the user to manually label the disk and give
2708 		 * a specific slice.
2709 		 */
2710 		(void) close(fd);
2711 		efi_free(vtoc);
2712 
2713 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 		    "try using fdisk(1M) and then provide a specific slice"));
2715 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2716 	}
2717 
2718 	(void) close(fd);
2719 	efi_free(vtoc);
2720 	return (0);
2721 }
2722 
2723 static boolean_t
2724 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
2725 {
2726 	char *type;
2727 	nvlist_t **child;
2728 	uint_t children, c;
2729 
2730 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
2731 	if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
2732 	    strcmp(type, VDEV_TYPE_FILE) == 0 ||
2733 	    strcmp(type, VDEV_TYPE_LOG) == 0 ||
2734 	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
2735 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2736 		    "vdev type '%s' is not supported"), type);
2737 		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
2738 		return (B_FALSE);
2739 	}
2740 	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
2741 	    &child, &children) == 0) {
2742 		for (c = 0; c < children; c++) {
2743 			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
2744 				return (B_FALSE);
2745 		}
2746 	}
2747 	return (B_TRUE);
2748 }
2749 
2750 /*
2751  * check if this zvol is allowable for use as a dump device; zero if
2752  * it is, > 0 if it isn't, < 0 if it isn't a zvol
2753  */
2754 int
2755 zvol_check_dump_config(char *arg)
2756 {
2757 	zpool_handle_t *zhp = NULL;
2758 	nvlist_t *config, *nvroot;
2759 	char *p, *volname;
2760 	nvlist_t **top;
2761 	uint_t toplevels;
2762 	libzfs_handle_t *hdl;
2763 	char errbuf[1024];
2764 	char poolname[ZPOOL_MAXNAMELEN];
2765 	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
2766 	int ret = 1;
2767 
2768 	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
2769 		return (-1);
2770 	}
2771 
2772 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
2773 	    "dump is not supported on device '%s'"), arg);
2774 
2775 	if ((hdl = libzfs_init()) == NULL)
2776 		return (1);
2777 	libzfs_print_on_error(hdl, B_TRUE);
2778 
2779 	volname = arg + pathlen;
2780 
2781 	/* check the configuration of the pool */
2782 	if ((p = strchr(volname, '/')) == NULL) {
2783 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2784 		    "malformed dataset name"));
2785 		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
2786 		return (1);
2787 	} else if (p - volname >= ZFS_MAXNAMELEN) {
2788 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2789 		    "dataset name is too long"));
2790 		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
2791 		return (1);
2792 	} else {
2793 		(void) strncpy(poolname, volname, p - volname);
2794 		poolname[p - volname] = '\0';
2795 	}
2796 
2797 	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
2798 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2799 		    "could not open pool '%s'"), poolname);
2800 		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
2801 		goto out;
2802 	}
2803 	config = zpool_get_config(zhp, NULL);
2804 	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2805 	    &nvroot) != 0) {
2806 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2807 		    "could not obtain vdev configuration for  '%s'"), poolname);
2808 		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
2809 		goto out;
2810 	}
2811 
2812 	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2813 	    &top, &toplevels) == 0);
2814 	if (toplevels != 1) {
2815 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2816 		    "'%s' has multiple top level vdevs"), poolname);
2817 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
2818 		goto out;
2819 	}
2820 
2821 	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
2822 		goto out;
2823 	}
2824 	ret = 0;
2825 
2826 out:
2827 	if (zhp)
2828 		zpool_close(zhp);
2829 	libzfs_fini(hdl);
2830 	return (ret);
2831 }
2832