xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_pool.c (revision 351420b34707afeafa8d5c3e0c77b7bcffb1edc0)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 
52 /*
53  * ====================================================================
54  *   zpool property functions
55  * ====================================================================
56  */
57 
58 static int
59 zpool_get_all_props(zpool_handle_t *zhp)
60 {
61 	zfs_cmd_t zc = { 0 };
62 	libzfs_handle_t *hdl = zhp->zpool_hdl;
63 
64 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
65 
66 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
67 		return (-1);
68 
69 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
70 		if (errno == ENOMEM) {
71 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
72 				zcmd_free_nvlists(&zc);
73 				return (-1);
74 			}
75 		} else {
76 			zcmd_free_nvlists(&zc);
77 			return (-1);
78 		}
79 	}
80 
81 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
82 		zcmd_free_nvlists(&zc);
83 		return (-1);
84 	}
85 
86 	zcmd_free_nvlists(&zc);
87 
88 	return (0);
89 }
90 
91 static int
92 zpool_props_refresh(zpool_handle_t *zhp)
93 {
94 	nvlist_t *old_props;
95 
96 	old_props = zhp->zpool_props;
97 
98 	if (zpool_get_all_props(zhp) != 0)
99 		return (-1);
100 
101 	nvlist_free(old_props);
102 	return (0);
103 }
104 
105 static char *
106 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
107     zprop_source_t *src)
108 {
109 	nvlist_t *nv, *nvl;
110 	uint64_t ival;
111 	char *value;
112 	zprop_source_t source;
113 
114 	nvl = zhp->zpool_props;
115 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
116 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
117 		source = ival;
118 		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
119 	} else {
120 		source = ZPROP_SRC_DEFAULT;
121 		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
122 			value = "-";
123 	}
124 
125 	if (src)
126 		*src = source;
127 
128 	return (value);
129 }
130 
131 uint64_t
132 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
133 {
134 	nvlist_t *nv, *nvl;
135 	uint64_t value;
136 	zprop_source_t source;
137 
138 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
139 		return (zpool_prop_default_numeric(prop));
140 
141 	nvl = zhp->zpool_props;
142 	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
143 		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
144 		source = value;
145 		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
146 	} else {
147 		source = ZPROP_SRC_DEFAULT;
148 		value = zpool_prop_default_numeric(prop);
149 	}
150 
151 	if (src)
152 		*src = source;
153 
154 	return (value);
155 }
156 
157 /*
158  * Map VDEV STATE to printed strings.
159  */
160 char *
161 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
162 {
163 	switch (state) {
164 	case VDEV_STATE_CLOSED:
165 	case VDEV_STATE_OFFLINE:
166 		return (gettext("OFFLINE"));
167 	case VDEV_STATE_REMOVED:
168 		return (gettext("REMOVED"));
169 	case VDEV_STATE_CANT_OPEN:
170 		if (aux == VDEV_AUX_CORRUPT_DATA)
171 			return (gettext("FAULTED"));
172 		else
173 			return (gettext("UNAVAIL"));
174 	case VDEV_STATE_FAULTED:
175 		return (gettext("FAULTED"));
176 	case VDEV_STATE_DEGRADED:
177 		return (gettext("DEGRADED"));
178 	case VDEV_STATE_HEALTHY:
179 		return (gettext("ONLINE"));
180 	}
181 
182 	return (gettext("UNKNOWN"));
183 }
184 
185 /*
186  * Get a zpool property value for 'prop' and return the value in
187  * a pre-allocated buffer.
188  */
189 int
190 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
191     zprop_source_t *srctype)
192 {
193 	uint64_t intval;
194 	const char *strval;
195 	zprop_source_t src = ZPROP_SRC_NONE;
196 	nvlist_t *nvroot;
197 	vdev_stat_t *vs;
198 	uint_t vsc;
199 
200 	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
201 		if (prop == ZPOOL_PROP_NAME)
202 			(void) strlcpy(buf, zpool_get_name(zhp), len);
203 		else if (prop == ZPOOL_PROP_HEALTH)
204 			(void) strlcpy(buf, "FAULTED", len);
205 		else
206 			(void) strlcpy(buf, "-", len);
207 		return (0);
208 	}
209 
210 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
211 	    prop != ZPOOL_PROP_NAME)
212 		return (-1);
213 
214 	switch (zpool_prop_get_type(prop)) {
215 	case PROP_TYPE_STRING:
216 		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
217 		    len);
218 		break;
219 
220 	case PROP_TYPE_NUMBER:
221 		intval = zpool_get_prop_int(zhp, prop, &src);
222 
223 		switch (prop) {
224 		case ZPOOL_PROP_SIZE:
225 		case ZPOOL_PROP_USED:
226 		case ZPOOL_PROP_AVAILABLE:
227 			(void) zfs_nicenum(intval, buf, len);
228 			break;
229 
230 		case ZPOOL_PROP_CAPACITY:
231 			(void) snprintf(buf, len, "%llu%%",
232 			    (u_longlong_t)intval);
233 			break;
234 
235 		case ZPOOL_PROP_HEALTH:
236 			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
237 			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
238 			verify(nvlist_lookup_uint64_array(nvroot,
239 			    ZPOOL_CONFIG_STATS, (uint64_t **)&vs, &vsc) == 0);
240 
241 			(void) strlcpy(buf, zpool_state_to_name(intval,
242 			    vs->vs_aux), len);
243 			break;
244 		default:
245 			(void) snprintf(buf, len, "%llu", intval);
246 		}
247 		break;
248 
249 	case PROP_TYPE_INDEX:
250 		intval = zpool_get_prop_int(zhp, prop, &src);
251 		if (zpool_prop_index_to_string(prop, intval, &strval)
252 		    != 0)
253 			return (-1);
254 		(void) strlcpy(buf, strval, len);
255 		break;
256 
257 	default:
258 		abort();
259 	}
260 
261 	if (srctype)
262 		*srctype = src;
263 
264 	return (0);
265 }
266 
267 /*
268  * Check if the bootfs name has the same pool name as it is set to.
269  * Assuming bootfs is a valid dataset name.
270  */
271 static boolean_t
272 bootfs_name_valid(const char *pool, char *bootfs)
273 {
274 	int len = strlen(pool);
275 
276 	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM))
277 		return (B_FALSE);
278 
279 	if (strncmp(pool, bootfs, len) == 0 &&
280 	    (bootfs[len] == '/' || bootfs[len] == '\0'))
281 		return (B_TRUE);
282 
283 	return (B_FALSE);
284 }
285 
286 /*
287  * Given an nvlist of zpool properties to be set, validate that they are
288  * correct, and parse any numeric properties (index, boolean, etc) if they are
289  * specified as strings.
290  */
291 static nvlist_t *
292 zpool_validate_properties(libzfs_handle_t *hdl, const char *poolname,
293     nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf)
294 {
295 	nvpair_t *elem;
296 	nvlist_t *retprops;
297 	zpool_prop_t prop;
298 	char *strval;
299 	uint64_t intval;
300 	int temp = -1;
301 	boolean_t has_altroot = B_FALSE;
302 
303 	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
304 		(void) no_memory(hdl);
305 		return (NULL);
306 	}
307 
308 	elem = NULL;
309 	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
310 		const char *propname = nvpair_name(elem);
311 
312 		/*
313 		 * Make sure this property is valid and applies to this type.
314 		 */
315 		if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) {
316 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
317 			    "invalid property '%s'"), propname);
318 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
319 			goto error;
320 		}
321 
322 		if (zpool_prop_readonly(prop)) {
323 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
324 			    "is readonly"), propname);
325 			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
326 			goto error;
327 		}
328 
329 		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
330 		    &strval, &intval, errbuf) != 0)
331 			goto error;
332 
333 		/*
334 		 * Perform additional checking for specific properties.
335 		 */
336 		switch (prop) {
337 		case ZPOOL_PROP_VERSION:
338 			if (intval < version || intval > SPA_VERSION) {
339 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
340 				    "property '%s' number %d is invalid."),
341 				    propname, intval);
342 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
343 				goto error;
344 			}
345 			break;
346 
347 		case ZPOOL_PROP_BOOTFS:
348 			if (create_or_import) {
349 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
350 				    "property '%s' cannot be set at creation "
351 				    "or import time"), propname);
352 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
353 				goto error;
354 			}
355 
356 			if (version < SPA_VERSION_BOOTFS) {
357 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
358 				    "pool must be upgraded to support "
359 				    "'%s' property"), propname);
360 				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
361 				goto error;
362 			}
363 
364 			/*
365 			 * bootfs property value has to be a dataset name and
366 			 * the dataset has to be in the same pool as it sets to.
367 			 */
368 			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
369 			    strval)) {
370 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
371 				    "is an invalid name"), strval);
372 				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
373 				goto error;
374 			}
375 			break;
376 
377 		case ZPOOL_PROP_TEMPORARY:
378 			if (!create_or_import) {
379 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
380 				    "property '%s' can only be set during pool "
381 				    "creation or import"), propname);
382 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
383 				goto error;
384 			}
385 			temp = intval;
386 			break;
387 
388 		case ZPOOL_PROP_ALTROOT:
389 			if (!create_or_import) {
390 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
391 				    "property '%s' can only be set during pool "
392 				    "creation or import"), propname);
393 				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
394 				goto error;
395 			}
396 
397 			if (strval[0] != '/') {
398 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
399 				    "bad alternate root '%s'"), strval);
400 				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
401 				goto error;
402 			}
403 
404 			has_altroot = B_TRUE;
405 			break;
406 		}
407 	}
408 
409 	if (has_altroot) {
410 		if (temp == 0) {
411 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
412 			    "temporary property must be set to 'on' when "
413 			    "altroot is set"));
414 			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
415 			goto error;
416 
417 		} else if (temp == -1 &&
418 		    nvlist_add_uint64(retprops,
419 		    zpool_prop_to_name(ZPOOL_PROP_TEMPORARY), 1) != 0) {
420 			(void) no_memory(hdl);
421 			goto error;
422 		}
423 	}
424 
425 	return (retprops);
426 error:
427 	nvlist_free(retprops);
428 	return (NULL);
429 }
430 
431 /*
432  * Set zpool property : propname=propval.
433  */
434 int
435 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
436 {
437 	zfs_cmd_t zc = { 0 };
438 	int ret = -1;
439 	char errbuf[1024];
440 	nvlist_t *nvl = NULL;
441 	nvlist_t *realprops;
442 	uint64_t version;
443 
444 	(void) snprintf(errbuf, sizeof (errbuf),
445 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
446 	    zhp->zpool_name);
447 
448 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
449 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
450 
451 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
452 		return (no_memory(zhp->zpool_hdl));
453 
454 	if (nvlist_add_string(nvl, propname, propval) != 0) {
455 		nvlist_free(nvl);
456 		return (no_memory(zhp->zpool_hdl));
457 	}
458 
459 	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
460 	if ((realprops = zpool_validate_properties(zhp->zpool_hdl,
461 	    zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) {
462 		nvlist_free(nvl);
463 		return (-1);
464 	}
465 
466 	nvlist_free(nvl);
467 	nvl = realprops;
468 
469 	/*
470 	 * Execute the corresponding ioctl() to set this property.
471 	 */
472 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
473 
474 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
475 		nvlist_free(nvl);
476 		return (-1);
477 	}
478 
479 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
480 
481 	zcmd_free_nvlists(&zc);
482 	nvlist_free(nvl);
483 
484 	if (ret)
485 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
486 	else
487 		(void) zpool_props_refresh(zhp);
488 
489 	return (ret);
490 }
491 
492 int
493 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
494 {
495 	libzfs_handle_t *hdl = zhp->zpool_hdl;
496 	zprop_list_t *entry;
497 	char buf[ZFS_MAXPROPLEN];
498 
499 	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
500 		return (-1);
501 
502 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
503 
504 		if (entry->pl_fixed)
505 			continue;
506 
507 		if (entry->pl_prop != ZPROP_INVAL &&
508 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
509 		    NULL) == 0) {
510 			if (strlen(buf) > entry->pl_width)
511 				entry->pl_width = strlen(buf);
512 		}
513 	}
514 
515 	return (0);
516 }
517 
518 
519 /*
520  * Validate the given pool name, optionally putting an extended error message in
521  * 'buf'.
522  */
523 static boolean_t
524 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
525 {
526 	namecheck_err_t why;
527 	char what;
528 	int ret;
529 
530 	ret = pool_namecheck(pool, &why, &what);
531 
532 	/*
533 	 * The rules for reserved pool names were extended at a later point.
534 	 * But we need to support users with existing pools that may now be
535 	 * invalid.  So we only check for this expanded set of names during a
536 	 * create (or import), and only in userland.
537 	 */
538 	if (ret == 0 && !isopen &&
539 	    (strncmp(pool, "mirror", 6) == 0 ||
540 	    strncmp(pool, "raidz", 5) == 0 ||
541 	    strncmp(pool, "spare", 5) == 0 ||
542 	    strcmp(pool, "log") == 0)) {
543 		zfs_error_aux(hdl,
544 		    dgettext(TEXT_DOMAIN, "name is reserved"));
545 		return (B_FALSE);
546 	}
547 
548 
549 	if (ret != 0) {
550 		if (hdl != NULL) {
551 			switch (why) {
552 			case NAME_ERR_TOOLONG:
553 				zfs_error_aux(hdl,
554 				    dgettext(TEXT_DOMAIN, "name is too long"));
555 				break;
556 
557 			case NAME_ERR_INVALCHAR:
558 				zfs_error_aux(hdl,
559 				    dgettext(TEXT_DOMAIN, "invalid character "
560 				    "'%c' in pool name"), what);
561 				break;
562 
563 			case NAME_ERR_NOLETTER:
564 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
565 				    "name must begin with a letter"));
566 				break;
567 
568 			case NAME_ERR_RESERVED:
569 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
570 				    "name is reserved"));
571 				break;
572 
573 			case NAME_ERR_DISKLIKE:
574 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 				    "pool name is reserved"));
576 				break;
577 
578 			case NAME_ERR_LEADING_SLASH:
579 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
580 				    "leading slash in name"));
581 				break;
582 
583 			case NAME_ERR_EMPTY_COMPONENT:
584 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
585 				    "empty component in name"));
586 				break;
587 
588 			case NAME_ERR_TRAILING_SLASH:
589 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
590 				    "trailing slash in name"));
591 				break;
592 
593 			case NAME_ERR_MULTIPLE_AT:
594 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
595 				    "multiple '@' delimiters in name"));
596 				break;
597 
598 			}
599 		}
600 		return (B_FALSE);
601 	}
602 
603 	return (B_TRUE);
604 }
605 
606 /*
607  * Open a handle to the given pool, even if the pool is currently in the FAULTED
608  * state.
609  */
610 zpool_handle_t *
611 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
612 {
613 	zpool_handle_t *zhp;
614 	boolean_t missing;
615 
616 	/*
617 	 * Make sure the pool name is valid.
618 	 */
619 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
620 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
621 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
622 		    pool);
623 		return (NULL);
624 	}
625 
626 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
627 		return (NULL);
628 
629 	zhp->zpool_hdl = hdl;
630 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
631 
632 	if (zpool_refresh_stats(zhp, &missing) != 0) {
633 		zpool_close(zhp);
634 		return (NULL);
635 	}
636 
637 	if (missing) {
638 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
639 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
640 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
641 		zpool_close(zhp);
642 		return (NULL);
643 	}
644 
645 	return (zhp);
646 }
647 
648 /*
649  * Like the above, but silent on error.  Used when iterating over pools (because
650  * the configuration cache may be out of date).
651  */
652 int
653 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
654 {
655 	zpool_handle_t *zhp;
656 	boolean_t missing;
657 
658 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
659 		return (-1);
660 
661 	zhp->zpool_hdl = hdl;
662 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
663 
664 	if (zpool_refresh_stats(zhp, &missing) != 0) {
665 		zpool_close(zhp);
666 		return (-1);
667 	}
668 
669 	if (missing) {
670 		zpool_close(zhp);
671 		*ret = NULL;
672 		return (0);
673 	}
674 
675 	*ret = zhp;
676 	return (0);
677 }
678 
679 /*
680  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
681  * state.
682  */
683 zpool_handle_t *
684 zpool_open(libzfs_handle_t *hdl, const char *pool)
685 {
686 	zpool_handle_t *zhp;
687 
688 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
689 		return (NULL);
690 
691 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
692 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
693 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
694 		zpool_close(zhp);
695 		return (NULL);
696 	}
697 
698 	return (zhp);
699 }
700 
701 /*
702  * Close the handle.  Simply frees the memory associated with the handle.
703  */
704 void
705 zpool_close(zpool_handle_t *zhp)
706 {
707 	if (zhp->zpool_config)
708 		nvlist_free(zhp->zpool_config);
709 	if (zhp->zpool_old_config)
710 		nvlist_free(zhp->zpool_old_config);
711 	if (zhp->zpool_props)
712 		nvlist_free(zhp->zpool_props);
713 	free(zhp);
714 }
715 
716 /*
717  * Return the name of the pool.
718  */
719 const char *
720 zpool_get_name(zpool_handle_t *zhp)
721 {
722 	return (zhp->zpool_name);
723 }
724 
725 
726 /*
727  * Return the state of the pool (ACTIVE or UNAVAILABLE)
728  */
729 int
730 zpool_get_state(zpool_handle_t *zhp)
731 {
732 	return (zhp->zpool_state);
733 }
734 
735 /*
736  * Create the named pool, using the provided vdev list.  It is assumed
737  * that the consumer has already validated the contents of the nvlist, so we
738  * don't have to worry about error semantics.
739  */
740 int
741 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
742     nvlist_t *props)
743 {
744 	zfs_cmd_t zc = { 0 };
745 	char msg[1024];
746 	char *altroot;
747 
748 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
749 	    "cannot create '%s'"), pool);
750 
751 	if (!zpool_name_valid(hdl, B_FALSE, pool))
752 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
753 
754 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
755 		return (-1);
756 
757 	if (props && (props = zpool_validate_properties(hdl, pool, props,
758 	    SPA_VERSION_1, B_TRUE, msg)) == NULL)
759 		return (-1);
760 
761 	if (props && zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
762 		nvlist_free(props);
763 		return (-1);
764 	}
765 
766 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
767 
768 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc) != 0) {
769 
770 		zcmd_free_nvlists(&zc);
771 		nvlist_free(props);
772 
773 		switch (errno) {
774 		case EBUSY:
775 			/*
776 			 * This can happen if the user has specified the same
777 			 * device multiple times.  We can't reliably detect this
778 			 * until we try to add it and see we already have a
779 			 * label.
780 			 */
781 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
782 			    "one or more vdevs refer to the same device"));
783 			return (zfs_error(hdl, EZFS_BADDEV, msg));
784 
785 		case EOVERFLOW:
786 			/*
787 			 * This occurs when one of the devices is below
788 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
789 			 * device was the problem device since there's no
790 			 * reliable way to determine device size from userland.
791 			 */
792 			{
793 				char buf[64];
794 
795 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
796 
797 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
798 				    "one or more devices is less than the "
799 				    "minimum size (%s)"), buf);
800 			}
801 			return (zfs_error(hdl, EZFS_BADDEV, msg));
802 
803 		case ENOSPC:
804 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
805 			    "one or more devices is out of space"));
806 			return (zfs_error(hdl, EZFS_BADDEV, msg));
807 
808 		default:
809 			return (zpool_standard_error(hdl, errno, msg));
810 		}
811 	}
812 
813 	/*
814 	 * If this is an alternate root pool, then we automatically set the
815 	 * mountpoint of the root dataset to be '/'.
816 	 */
817 	if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
818 	    &altroot) == 0) {
819 		zfs_handle_t *zhp;
820 
821 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
822 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
823 		    "/") == 0);
824 
825 		zfs_close(zhp);
826 	}
827 
828 	zcmd_free_nvlists(&zc);
829 	nvlist_free(props);
830 	return (0);
831 }
832 
833 /*
834  * Destroy the given pool.  It is up to the caller to ensure that there are no
835  * datasets left in the pool.
836  */
837 int
838 zpool_destroy(zpool_handle_t *zhp)
839 {
840 	zfs_cmd_t zc = { 0 };
841 	zfs_handle_t *zfp = NULL;
842 	libzfs_handle_t *hdl = zhp->zpool_hdl;
843 	char msg[1024];
844 
845 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
846 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
847 	    ZFS_TYPE_FILESYSTEM)) == NULL)
848 		return (-1);
849 
850 	if (zpool_remove_zvol_links(zhp) != 0)
851 		return (-1);
852 
853 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
854 
855 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
856 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
857 		    "cannot destroy '%s'"), zhp->zpool_name);
858 
859 		if (errno == EROFS) {
860 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
861 			    "one or more devices is read only"));
862 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
863 		} else {
864 			(void) zpool_standard_error(hdl, errno, msg);
865 		}
866 
867 		if (zfp)
868 			zfs_close(zfp);
869 		return (-1);
870 	}
871 
872 	if (zfp) {
873 		remove_mountpoint(zfp);
874 		zfs_close(zfp);
875 	}
876 
877 	return (0);
878 }
879 
880 /*
881  * Add the given vdevs to the pool.  The caller must have already performed the
882  * necessary verification to ensure that the vdev specification is well-formed.
883  */
884 int
885 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
886 {
887 	zfs_cmd_t zc = { 0 };
888 	int ret;
889 	libzfs_handle_t *hdl = zhp->zpool_hdl;
890 	char msg[1024];
891 	nvlist_t **spares;
892 	uint_t nspares;
893 
894 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
895 	    "cannot add to '%s'"), zhp->zpool_name);
896 
897 	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL)
898 	    < SPA_VERSION_SPARES &&
899 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
900 	    &spares, &nspares) == 0) {
901 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
902 		    "upgraded to add hot spares"));
903 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
904 	}
905 
906 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
907 		return (-1);
908 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
909 
910 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
911 		switch (errno) {
912 		case EBUSY:
913 			/*
914 			 * This can happen if the user has specified the same
915 			 * device multiple times.  We can't reliably detect this
916 			 * until we try to add it and see we already have a
917 			 * label.
918 			 */
919 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
920 			    "one or more vdevs refer to the same device"));
921 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
922 			break;
923 
924 		case EOVERFLOW:
925 			/*
926 			 * This occurrs when one of the devices is below
927 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
928 			 * device was the problem device since there's no
929 			 * reliable way to determine device size from userland.
930 			 */
931 			{
932 				char buf[64];
933 
934 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
935 
936 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
937 				    "device is less than the minimum "
938 				    "size (%s)"), buf);
939 			}
940 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
941 			break;
942 
943 		case ENOTSUP:
944 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
945 			    "pool must be upgraded to add these vdevs"));
946 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
947 			break;
948 
949 		case EDOM:
950 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
951 			    "root pool can not have multiple vdevs"
952 			    " or separate logs"));
953 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
954 			break;
955 
956 		default:
957 			(void) zpool_standard_error(hdl, errno, msg);
958 		}
959 
960 		ret = -1;
961 	} else {
962 		ret = 0;
963 	}
964 
965 	zcmd_free_nvlists(&zc);
966 
967 	return (ret);
968 }
969 
970 /*
971  * Exports the pool from the system.  The caller must ensure that there are no
972  * mounted datasets in the pool.
973  */
974 int
975 zpool_export(zpool_handle_t *zhp)
976 {
977 	zfs_cmd_t zc = { 0 };
978 
979 	if (zpool_remove_zvol_links(zhp) != 0)
980 		return (-1);
981 
982 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
983 
984 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0)
985 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
986 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
987 		    zhp->zpool_name));
988 	return (0);
989 }
990 
991 /*
992  * zpool_import() is a contracted interface. Should be kept the same
993  * if possible.
994  *
995  * Applications should use zpool_import_props() to import a pool with
996  * new properties value to be set.
997  */
998 int
999 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1000     char *altroot)
1001 {
1002 	nvlist_t *props = NULL;
1003 	int ret;
1004 
1005 	if (altroot != NULL) {
1006 		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1007 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1008 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1009 			    newname));
1010 		}
1011 
1012 		if (nvlist_add_string(props,
1013 		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0) {
1014 			nvlist_free(props);
1015 			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1016 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1017 			    newname));
1018 		}
1019 	}
1020 
1021 	ret = zpool_import_props(hdl, config, newname, props);
1022 	if (props)
1023 		nvlist_free(props);
1024 	return (ret);
1025 }
1026 
1027 /*
1028  * Import the given pool using the known configuration and a list of
1029  * properties to be set. The configuration should have come from
1030  * zpool_find_import(). The 'newname' parameters control whether the pool
1031  * is imported with a different name.
1032  */
1033 int
1034 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1035     nvlist_t *props)
1036 {
1037 	zfs_cmd_t zc = { 0 };
1038 	char *thename;
1039 	char *origname;
1040 	int ret;
1041 	char errbuf[1024];
1042 
1043 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1044 	    &origname) == 0);
1045 
1046 	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1047 	    "cannot import pool '%s'"), origname);
1048 
1049 	if (newname != NULL) {
1050 		if (!zpool_name_valid(hdl, B_FALSE, newname))
1051 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1052 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1053 			    newname));
1054 		thename = (char *)newname;
1055 	} else {
1056 		thename = origname;
1057 	}
1058 
1059 	if (props) {
1060 		uint64_t version;
1061 
1062 		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1063 		    &version) == 0);
1064 
1065 		if ((props = zpool_validate_properties(hdl, origname,
1066 		    props, version, B_TRUE, errbuf)) == NULL) {
1067 			return (-1);
1068 		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1069 			nvlist_free(props);
1070 			return (-1);
1071 		}
1072 	}
1073 
1074 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1075 
1076 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1077 	    &zc.zc_guid) == 0);
1078 
1079 	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1080 		nvlist_free(props);
1081 		return (-1);
1082 	}
1083 
1084 	ret = 0;
1085 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
1086 		char desc[1024];
1087 		if (newname == NULL)
1088 			(void) snprintf(desc, sizeof (desc),
1089 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1090 			    thename);
1091 		else
1092 			(void) snprintf(desc, sizeof (desc),
1093 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1094 			    origname, thename);
1095 
1096 		switch (errno) {
1097 		case ENOTSUP:
1098 			/*
1099 			 * Unsupported version.
1100 			 */
1101 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1102 			break;
1103 
1104 		case EINVAL:
1105 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1106 			break;
1107 
1108 		default:
1109 			(void) zpool_standard_error(hdl, errno, desc);
1110 		}
1111 
1112 		ret = -1;
1113 	} else {
1114 		zpool_handle_t *zhp;
1115 
1116 		/*
1117 		 * This should never fail, but play it safe anyway.
1118 		 */
1119 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
1120 			ret = -1;
1121 		} else if (zhp != NULL) {
1122 			ret = zpool_create_zvol_links(zhp);
1123 			zpool_close(zhp);
1124 		}
1125 
1126 	}
1127 
1128 	zcmd_free_nvlists(&zc);
1129 	nvlist_free(props);
1130 
1131 	return (ret);
1132 }
1133 
1134 /*
1135  * Scrub the pool.
1136  */
1137 int
1138 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
1139 {
1140 	zfs_cmd_t zc = { 0 };
1141 	char msg[1024];
1142 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1143 
1144 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1145 	zc.zc_cookie = type;
1146 
1147 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCRUB, &zc) == 0)
1148 		return (0);
1149 
1150 	(void) snprintf(msg, sizeof (msg),
1151 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1152 
1153 	if (errno == EBUSY)
1154 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
1155 	else
1156 		return (zpool_standard_error(hdl, errno, msg));
1157 }
1158 
1159 /*
1160  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1161  * spare; but FALSE if its an INUSE spare.
1162  */
1163 static nvlist_t *
1164 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
1165     boolean_t *avail_spare)
1166 {
1167 	uint_t c, children;
1168 	nvlist_t **child;
1169 	uint64_t theguid, present;
1170 	char *path;
1171 	uint64_t wholedisk = 0;
1172 	nvlist_t *ret;
1173 
1174 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
1175 
1176 	if (search == NULL &&
1177 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
1178 		/*
1179 		 * If the device has never been present since import, the only
1180 		 * reliable way to match the vdev is by GUID.
1181 		 */
1182 		if (theguid == guid)
1183 			return (nv);
1184 	} else if (search != NULL &&
1185 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1186 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1187 		    &wholedisk);
1188 		if (wholedisk) {
1189 			/*
1190 			 * For whole disks, the internal path has 's0', but the
1191 			 * path passed in by the user doesn't.
1192 			 */
1193 			if (strlen(search) == strlen(path) - 2 &&
1194 			    strncmp(search, path, strlen(search)) == 0)
1195 				return (nv);
1196 		} else if (strcmp(search, path) == 0) {
1197 			return (nv);
1198 		}
1199 	}
1200 
1201 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1202 	    &child, &children) != 0)
1203 		return (NULL);
1204 
1205 	for (c = 0; c < children; c++)
1206 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1207 		    avail_spare)) != NULL)
1208 			return (ret);
1209 
1210 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
1211 	    &child, &children) == 0) {
1212 		for (c = 0; c < children; c++) {
1213 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
1214 			    avail_spare)) != NULL) {
1215 				*avail_spare = B_TRUE;
1216 				return (ret);
1217 			}
1218 		}
1219 	}
1220 
1221 	return (NULL);
1222 }
1223 
1224 nvlist_t *
1225 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
1226 {
1227 	char buf[MAXPATHLEN];
1228 	const char *search;
1229 	char *end;
1230 	nvlist_t *nvroot;
1231 	uint64_t guid;
1232 
1233 	guid = strtoull(path, &end, 10);
1234 	if (guid != 0 && *end == '\0') {
1235 		search = NULL;
1236 	} else if (path[0] != '/') {
1237 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
1238 		search = buf;
1239 	} else {
1240 		search = path;
1241 	}
1242 
1243 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1244 	    &nvroot) == 0);
1245 
1246 	*avail_spare = B_FALSE;
1247 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
1248 }
1249 
1250 /*
1251  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
1252  */
1253 static boolean_t
1254 is_spare(zpool_handle_t *zhp, uint64_t guid)
1255 {
1256 	uint64_t spare_guid;
1257 	nvlist_t *nvroot;
1258 	nvlist_t **spares;
1259 	uint_t nspares;
1260 	int i;
1261 
1262 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1263 	    &nvroot) == 0);
1264 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1265 	    &spares, &nspares) == 0) {
1266 		for (i = 0; i < nspares; i++) {
1267 			verify(nvlist_lookup_uint64(spares[i],
1268 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
1269 			if (guid == spare_guid)
1270 				return (B_TRUE);
1271 		}
1272 	}
1273 
1274 	return (B_FALSE);
1275 }
1276 
1277 /*
1278  * Bring the specified vdev online.   The 'flags' parameter is a set of the
1279  * ZFS_ONLINE_* flags.
1280  */
1281 int
1282 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
1283     vdev_state_t *newstate)
1284 {
1285 	zfs_cmd_t zc = { 0 };
1286 	char msg[1024];
1287 	nvlist_t *tgt;
1288 	boolean_t avail_spare;
1289 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1290 
1291 	(void) snprintf(msg, sizeof (msg),
1292 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
1293 
1294 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1295 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
1296 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1297 
1298 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1299 
1300 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
1301 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1302 
1303 	zc.zc_cookie = VDEV_STATE_ONLINE;
1304 	zc.zc_obj = flags;
1305 
1306 
1307 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
1308 		return (zpool_standard_error(hdl, errno, msg));
1309 
1310 	*newstate = zc.zc_cookie;
1311 	return (0);
1312 }
1313 
1314 /*
1315  * Take the specified vdev offline
1316  */
1317 int
1318 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
1319 {
1320 	zfs_cmd_t zc = { 0 };
1321 	char msg[1024];
1322 	nvlist_t *tgt;
1323 	boolean_t avail_spare;
1324 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1325 
1326 	(void) snprintf(msg, sizeof (msg),
1327 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
1328 
1329 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1330 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
1331 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1332 
1333 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1334 
1335 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
1336 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1337 
1338 	zc.zc_cookie = VDEV_STATE_OFFLINE;
1339 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
1340 
1341 	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1342 		return (0);
1343 
1344 	switch (errno) {
1345 	case EBUSY:
1346 
1347 		/*
1348 		 * There are no other replicas of this device.
1349 		 */
1350 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1351 
1352 	default:
1353 		return (zpool_standard_error(hdl, errno, msg));
1354 	}
1355 }
1356 
1357 /*
1358  * Mark the given vdev faulted.
1359  */
1360 int
1361 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
1362 {
1363 	zfs_cmd_t zc = { 0 };
1364 	char msg[1024];
1365 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1366 
1367 	(void) snprintf(msg, sizeof (msg),
1368 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
1369 
1370 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1371 	zc.zc_guid = guid;
1372 	zc.zc_cookie = VDEV_STATE_FAULTED;
1373 
1374 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1375 		return (0);
1376 
1377 	switch (errno) {
1378 	case EBUSY:
1379 
1380 		/*
1381 		 * There are no other replicas of this device.
1382 		 */
1383 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
1384 
1385 	default:
1386 		return (zpool_standard_error(hdl, errno, msg));
1387 	}
1388 
1389 }
1390 
1391 /*
1392  * Mark the given vdev degraded.
1393  */
1394 int
1395 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
1396 {
1397 	zfs_cmd_t zc = { 0 };
1398 	char msg[1024];
1399 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1400 
1401 	(void) snprintf(msg, sizeof (msg),
1402 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
1403 
1404 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1405 	zc.zc_guid = guid;
1406 	zc.zc_cookie = VDEV_STATE_DEGRADED;
1407 
1408 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
1409 		return (0);
1410 
1411 	return (zpool_standard_error(hdl, errno, msg));
1412 }
1413 
1414 /*
1415  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1416  * a hot spare.
1417  */
1418 static boolean_t
1419 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1420 {
1421 	nvlist_t **child;
1422 	uint_t c, children;
1423 	char *type;
1424 
1425 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1426 	    &children) == 0) {
1427 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1428 		    &type) == 0);
1429 
1430 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1431 		    children == 2 && child[which] == tgt)
1432 			return (B_TRUE);
1433 
1434 		for (c = 0; c < children; c++)
1435 			if (is_replacing_spare(child[c], tgt, which))
1436 				return (B_TRUE);
1437 	}
1438 
1439 	return (B_FALSE);
1440 }
1441 
1442 /*
1443  * Attach new_disk (fully described by nvroot) to old_disk.
1444  * If 'replacing' is specified, the new disk will replace the old one.
1445  */
1446 int
1447 zpool_vdev_attach(zpool_handle_t *zhp,
1448     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1449 {
1450 	zfs_cmd_t zc = { 0 };
1451 	char msg[1024];
1452 	int ret;
1453 	nvlist_t *tgt;
1454 	boolean_t avail_spare;
1455 	uint64_t val, is_log;
1456 	char *path;
1457 	nvlist_t **child;
1458 	uint_t children;
1459 	nvlist_t *config_root;
1460 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1461 
1462 	if (replacing)
1463 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1464 		    "cannot replace %s with %s"), old_disk, new_disk);
1465 	else
1466 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1467 		    "cannot attach %s to %s"), new_disk, old_disk);
1468 
1469 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1470 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1471 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1472 
1473 	if (avail_spare)
1474 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1475 
1476 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1477 	zc.zc_cookie = replacing;
1478 
1479 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1480 	    &child, &children) != 0 || children != 1) {
1481 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1482 		    "new device must be a single disk"));
1483 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1484 	}
1485 
1486 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1487 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1488 
1489 	/*
1490 	 * If the target is a hot spare that has been swapped in, we can only
1491 	 * replace it with another hot spare.
1492 	 */
1493 	if (replacing &&
1494 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1495 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1496 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1497 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1498 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1499 		    "can only be replaced by another hot spare"));
1500 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1501 	}
1502 
1503 	/*
1504 	 * If we are attempting to replace a spare, it canot be applied to an
1505 	 * already spared device.
1506 	 */
1507 	if (replacing &&
1508 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1509 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1510 	    is_replacing_spare(config_root, tgt, 0)) {
1511 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1512 		    "device has already been replaced with a spare"));
1513 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1514 	}
1515 
1516 	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1517 		return (-1);
1518 
1519 	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc);
1520 
1521 	zcmd_free_nvlists(&zc);
1522 
1523 	if (ret == 0)
1524 		return (0);
1525 
1526 	switch (errno) {
1527 	case ENOTSUP:
1528 		/*
1529 		 * Can't attach to or replace this type of vdev.
1530 		 */
1531 		if (replacing) {
1532 			is_log = B_FALSE;
1533 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1534 			    &is_log);
1535 			if (is_log)
1536 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1537 				    "cannot replace a log with a spare"));
1538 			else
1539 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1540 				    "cannot replace a replacing device"));
1541 		} else {
1542 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1543 			    "can only attach to mirrors and top-level "
1544 			    "disks"));
1545 		}
1546 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1547 		break;
1548 
1549 	case EINVAL:
1550 		/*
1551 		 * The new device must be a single disk.
1552 		 */
1553 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1554 		    "new device must be a single disk"));
1555 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1556 		break;
1557 
1558 	case EBUSY:
1559 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1560 		    new_disk);
1561 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1562 		break;
1563 
1564 	case EOVERFLOW:
1565 		/*
1566 		 * The new device is too small.
1567 		 */
1568 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1569 		    "device is too small"));
1570 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1571 		break;
1572 
1573 	case EDOM:
1574 		/*
1575 		 * The new device has a different alignment requirement.
1576 		 */
1577 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1578 		    "devices have different sector alignment"));
1579 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1580 		break;
1581 
1582 	case ENAMETOOLONG:
1583 		/*
1584 		 * The resulting top-level vdev spec won't fit in the label.
1585 		 */
1586 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1587 		break;
1588 
1589 	default:
1590 		(void) zpool_standard_error(hdl, errno, msg);
1591 	}
1592 
1593 	return (-1);
1594 }
1595 
1596 /*
1597  * Detach the specified device.
1598  */
1599 int
1600 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1601 {
1602 	zfs_cmd_t zc = { 0 };
1603 	char msg[1024];
1604 	nvlist_t *tgt;
1605 	boolean_t avail_spare;
1606 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1607 
1608 	(void) snprintf(msg, sizeof (msg),
1609 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1610 
1611 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1612 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1613 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1614 
1615 	if (avail_spare)
1616 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1617 
1618 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1619 
1620 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1621 		return (0);
1622 
1623 	switch (errno) {
1624 
1625 	case ENOTSUP:
1626 		/*
1627 		 * Can't detach from this type of vdev.
1628 		 */
1629 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1630 		    "applicable to mirror and replacing vdevs"));
1631 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1632 		break;
1633 
1634 	case EBUSY:
1635 		/*
1636 		 * There are no other replicas of this device.
1637 		 */
1638 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1639 		break;
1640 
1641 	default:
1642 		(void) zpool_standard_error(hdl, errno, msg);
1643 	}
1644 
1645 	return (-1);
1646 }
1647 
1648 /*
1649  * Remove the given device.  Currently, this is supported only for hot spares.
1650  */
1651 int
1652 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1653 {
1654 	zfs_cmd_t zc = { 0 };
1655 	char msg[1024];
1656 	nvlist_t *tgt;
1657 	boolean_t avail_spare;
1658 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1659 
1660 	(void) snprintf(msg, sizeof (msg),
1661 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1662 
1663 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1664 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1665 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1666 
1667 	if (!avail_spare) {
1668 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1669 		    "only inactive hot spares can be removed"));
1670 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1671 	}
1672 
1673 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1674 
1675 	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1676 		return (0);
1677 
1678 	return (zpool_standard_error(hdl, errno, msg));
1679 }
1680 
1681 /*
1682  * Clear the errors for the pool, or the particular device if specified.
1683  */
1684 int
1685 zpool_clear(zpool_handle_t *zhp, const char *path)
1686 {
1687 	zfs_cmd_t zc = { 0 };
1688 	char msg[1024];
1689 	nvlist_t *tgt;
1690 	boolean_t avail_spare;
1691 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1692 
1693 	if (path)
1694 		(void) snprintf(msg, sizeof (msg),
1695 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1696 		    path);
1697 	else
1698 		(void) snprintf(msg, sizeof (msg),
1699 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1700 		    zhp->zpool_name);
1701 
1702 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1703 	if (path) {
1704 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1705 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1706 
1707 		if (avail_spare)
1708 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1709 
1710 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1711 		    &zc.zc_guid) == 0);
1712 	}
1713 
1714 	if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
1715 		return (0);
1716 
1717 	return (zpool_standard_error(hdl, errno, msg));
1718 }
1719 
1720 /*
1721  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1722  */
1723 int
1724 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1725 {
1726 	zfs_cmd_t zc = { 0 };
1727 	char msg[1024];
1728 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1729 
1730 	(void) snprintf(msg, sizeof (msg),
1731 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1732 	    guid);
1733 
1734 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1735 	zc.zc_guid = guid;
1736 
1737 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1738 		return (0);
1739 
1740 	return (zpool_standard_error(hdl, errno, msg));
1741 }
1742 
1743 /*
1744  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1745  * hierarchy.
1746  */
1747 int
1748 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1749     void *data)
1750 {
1751 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1752 	char (*paths)[MAXPATHLEN];
1753 	size_t size = 4;
1754 	int curr, fd, base, ret = 0;
1755 	DIR *dirp;
1756 	struct dirent *dp;
1757 	struct stat st;
1758 
1759 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1760 		return (errno == ENOENT ? 0 : -1);
1761 
1762 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1763 		int err = errno;
1764 		(void) close(base);
1765 		return (err == ENOENT ? 0 : -1);
1766 	}
1767 
1768 	/*
1769 	 * Oddly this wasn't a directory -- ignore that failure since we
1770 	 * know there are no links lower in the (non-existant) hierarchy.
1771 	 */
1772 	if (!S_ISDIR(st.st_mode)) {
1773 		(void) close(base);
1774 		return (0);
1775 	}
1776 
1777 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1778 		(void) close(base);
1779 		return (-1);
1780 	}
1781 
1782 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1783 	curr = 0;
1784 
1785 	while (curr >= 0) {
1786 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1787 			goto err;
1788 
1789 		if (S_ISDIR(st.st_mode)) {
1790 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1791 				goto err;
1792 
1793 			if ((dirp = fdopendir(fd)) == NULL) {
1794 				(void) close(fd);
1795 				goto err;
1796 			}
1797 
1798 			while ((dp = readdir(dirp)) != NULL) {
1799 				if (dp->d_name[0] == '.')
1800 					continue;
1801 
1802 				if (curr + 1 == size) {
1803 					paths = zfs_realloc(hdl, paths,
1804 					    size * sizeof (paths[0]),
1805 					    size * 2 * sizeof (paths[0]));
1806 					if (paths == NULL) {
1807 						(void) closedir(dirp);
1808 						(void) close(fd);
1809 						goto err;
1810 					}
1811 
1812 					size *= 2;
1813 				}
1814 
1815 				(void) strlcpy(paths[curr + 1], paths[curr],
1816 				    sizeof (paths[curr + 1]));
1817 				(void) strlcat(paths[curr], "/",
1818 				    sizeof (paths[curr]));
1819 				(void) strlcat(paths[curr], dp->d_name,
1820 				    sizeof (paths[curr]));
1821 				curr++;
1822 			}
1823 
1824 			(void) closedir(dirp);
1825 
1826 		} else {
1827 			if ((ret = cb(paths[curr], data)) != 0)
1828 				break;
1829 		}
1830 
1831 		curr--;
1832 	}
1833 
1834 	free(paths);
1835 	(void) close(base);
1836 
1837 	return (ret);
1838 
1839 err:
1840 	free(paths);
1841 	(void) close(base);
1842 	return (-1);
1843 }
1844 
1845 typedef struct zvol_cb {
1846 	zpool_handle_t *zcb_pool;
1847 	boolean_t zcb_create;
1848 } zvol_cb_t;
1849 
1850 /*ARGSUSED*/
1851 static int
1852 do_zvol_create(zfs_handle_t *zhp, void *data)
1853 {
1854 	int ret = 0;
1855 
1856 	if (ZFS_IS_VOLUME(zhp)) {
1857 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1858 		ret = zfs_iter_snapshots(zhp, do_zvol_create, NULL);
1859 	}
1860 
1861 	if (ret == 0)
1862 		ret = zfs_iter_filesystems(zhp, do_zvol_create, NULL);
1863 
1864 	zfs_close(zhp);
1865 
1866 	return (ret);
1867 }
1868 
1869 /*
1870  * Iterate over all zvols in the pool and make any necessary minor nodes.
1871  */
1872 int
1873 zpool_create_zvol_links(zpool_handle_t *zhp)
1874 {
1875 	zfs_handle_t *zfp;
1876 	int ret;
1877 
1878 	/*
1879 	 * If the pool is unavailable, just return success.
1880 	 */
1881 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1882 	    zhp->zpool_name)) == NULL)
1883 		return (0);
1884 
1885 	ret = zfs_iter_filesystems(zfp, do_zvol_create, NULL);
1886 
1887 	zfs_close(zfp);
1888 	return (ret);
1889 }
1890 
1891 static int
1892 do_zvol_remove(const char *dataset, void *data)
1893 {
1894 	zpool_handle_t *zhp = data;
1895 
1896 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1897 }
1898 
1899 /*
1900  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1901  * by examining the /dev links so that a corrupted pool doesn't impede this
1902  * operation.
1903  */
1904 int
1905 zpool_remove_zvol_links(zpool_handle_t *zhp)
1906 {
1907 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1908 }
1909 
1910 /*
1911  * Convert from a devid string to a path.
1912  */
1913 static char *
1914 devid_to_path(char *devid_str)
1915 {
1916 	ddi_devid_t devid;
1917 	char *minor;
1918 	char *path;
1919 	devid_nmlist_t *list = NULL;
1920 	int ret;
1921 
1922 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1923 		return (NULL);
1924 
1925 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1926 
1927 	devid_str_free(minor);
1928 	devid_free(devid);
1929 
1930 	if (ret != 0)
1931 		return (NULL);
1932 
1933 	if ((path = strdup(list[0].devname)) == NULL)
1934 		return (NULL);
1935 
1936 	devid_free_nmlist(list);
1937 
1938 	return (path);
1939 }
1940 
1941 /*
1942  * Convert from a path to a devid string.
1943  */
1944 static char *
1945 path_to_devid(const char *path)
1946 {
1947 	int fd;
1948 	ddi_devid_t devid;
1949 	char *minor, *ret;
1950 
1951 	if ((fd = open(path, O_RDONLY)) < 0)
1952 		return (NULL);
1953 
1954 	minor = NULL;
1955 	ret = NULL;
1956 	if (devid_get(fd, &devid) == 0) {
1957 		if (devid_get_minor_name(fd, &minor) == 0)
1958 			ret = devid_str_encode(devid, minor);
1959 		if (minor != NULL)
1960 			devid_str_free(minor);
1961 		devid_free(devid);
1962 	}
1963 	(void) close(fd);
1964 
1965 	return (ret);
1966 }
1967 
1968 /*
1969  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1970  * ignore any failure here, since a common case is for an unprivileged user to
1971  * type 'zpool status', and we'll display the correct information anyway.
1972  */
1973 static void
1974 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1975 {
1976 	zfs_cmd_t zc = { 0 };
1977 
1978 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1979 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1980 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1981 	    &zc.zc_guid) == 0);
1982 
1983 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1984 }
1985 
1986 /*
1987  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1988  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1989  * We also check if this is a whole disk, in which case we strip off the
1990  * trailing 's0' slice name.
1991  *
1992  * This routine is also responsible for identifying when disks have been
1993  * reconfigured in a new location.  The kernel will have opened the device by
1994  * devid, but the path will still refer to the old location.  To catch this, we
1995  * first do a path -> devid translation (which is fast for the common case).  If
1996  * the devid matches, we're done.  If not, we do a reverse devid -> path
1997  * translation and issue the appropriate ioctl() to update the path of the vdev.
1998  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1999  * of these checks.
2000  */
2001 char *
2002 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
2003 {
2004 	char *path, *devid;
2005 	uint64_t value;
2006 	char buf[64];
2007 	vdev_stat_t *vs;
2008 	uint_t vsc;
2009 
2010 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
2011 	    &value) == 0) {
2012 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
2013 		    &value) == 0);
2014 		(void) snprintf(buf, sizeof (buf), "%llu",
2015 		    (u_longlong_t)value);
2016 		path = buf;
2017 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
2018 
2019 		/*
2020 		 * If the device is dead (faulted, offline, etc) then don't
2021 		 * bother opening it.  Otherwise we may be forcing the user to
2022 		 * open a misbehaving device, which can have undesirable
2023 		 * effects.
2024 		 */
2025 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
2026 		    (uint64_t **)&vs, &vsc) != 0 ||
2027 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
2028 		    zhp != NULL &&
2029 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
2030 			/*
2031 			 * Determine if the current path is correct.
2032 			 */
2033 			char *newdevid = path_to_devid(path);
2034 
2035 			if (newdevid == NULL ||
2036 			    strcmp(devid, newdevid) != 0) {
2037 				char *newpath;
2038 
2039 				if ((newpath = devid_to_path(devid)) != NULL) {
2040 					/*
2041 					 * Update the path appropriately.
2042 					 */
2043 					set_path(zhp, nv, newpath);
2044 					if (nvlist_add_string(nv,
2045 					    ZPOOL_CONFIG_PATH, newpath) == 0)
2046 						verify(nvlist_lookup_string(nv,
2047 						    ZPOOL_CONFIG_PATH,
2048 						    &path) == 0);
2049 					free(newpath);
2050 				}
2051 			}
2052 
2053 			if (newdevid)
2054 				devid_str_free(newdevid);
2055 		}
2056 
2057 		if (strncmp(path, "/dev/dsk/", 9) == 0)
2058 			path += 9;
2059 
2060 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2061 		    &value) == 0 && value) {
2062 			char *tmp = zfs_strdup(hdl, path);
2063 			if (tmp == NULL)
2064 				return (NULL);
2065 			tmp[strlen(path) - 2] = '\0';
2066 			return (tmp);
2067 		}
2068 	} else {
2069 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
2070 
2071 		/*
2072 		 * If it's a raidz device, we need to stick in the parity level.
2073 		 */
2074 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
2075 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
2076 			    &value) == 0);
2077 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
2078 			    (u_longlong_t)value);
2079 			path = buf;
2080 		}
2081 	}
2082 
2083 	return (zfs_strdup(hdl, path));
2084 }
2085 
2086 static int
2087 zbookmark_compare(const void *a, const void *b)
2088 {
2089 	return (memcmp(a, b, sizeof (zbookmark_t)));
2090 }
2091 
2092 /*
2093  * Retrieve the persistent error log, uniquify the members, and return to the
2094  * caller.
2095  */
2096 int
2097 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
2098 {
2099 	zfs_cmd_t zc = { 0 };
2100 	uint64_t count;
2101 	zbookmark_t *zb = NULL;
2102 	int i;
2103 
2104 	/*
2105 	 * Retrieve the raw error list from the kernel.  If the number of errors
2106 	 * has increased, allocate more space and continue until we get the
2107 	 * entire list.
2108 	 */
2109 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
2110 	    &count) == 0);
2111 	if (count == 0)
2112 		return (0);
2113 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
2114 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
2115 		return (-1);
2116 	zc.zc_nvlist_dst_size = count;
2117 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2118 	for (;;) {
2119 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
2120 		    &zc) != 0) {
2121 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
2122 			if (errno == ENOMEM) {
2123 				count = zc.zc_nvlist_dst_size;
2124 				if ((zc.zc_nvlist_dst = (uintptr_t)
2125 				    zfs_alloc(zhp->zpool_hdl, count *
2126 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
2127 					return (-1);
2128 			} else {
2129 				return (-1);
2130 			}
2131 		} else {
2132 			break;
2133 		}
2134 	}
2135 
2136 	/*
2137 	 * Sort the resulting bookmarks.  This is a little confusing due to the
2138 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
2139 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
2140 	 * _not_ copied as part of the process.  So we point the start of our
2141 	 * array appropriate and decrement the total number of elements.
2142 	 */
2143 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
2144 	    zc.zc_nvlist_dst_size;
2145 	count -= zc.zc_nvlist_dst_size;
2146 
2147 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
2148 
2149 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
2150 
2151 	/*
2152 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
2153 	 */
2154 	for (i = 0; i < count; i++) {
2155 		nvlist_t *nv;
2156 
2157 		/* ignoring zb_blkid and zb_level for now */
2158 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
2159 		    zb[i-1].zb_object == zb[i].zb_object)
2160 			continue;
2161 
2162 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
2163 			goto nomem;
2164 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
2165 		    zb[i].zb_objset) != 0) {
2166 			nvlist_free(nv);
2167 			goto nomem;
2168 		}
2169 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
2170 		    zb[i].zb_object) != 0) {
2171 			nvlist_free(nv);
2172 			goto nomem;
2173 		}
2174 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
2175 			nvlist_free(nv);
2176 			goto nomem;
2177 		}
2178 		nvlist_free(nv);
2179 	}
2180 
2181 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2182 	return (0);
2183 
2184 nomem:
2185 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
2186 	return (no_memory(zhp->zpool_hdl));
2187 }
2188 
2189 /*
2190  * Upgrade a ZFS pool to the latest on-disk version.
2191  */
2192 int
2193 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
2194 {
2195 	zfs_cmd_t zc = { 0 };
2196 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2197 
2198 	(void) strcpy(zc.zc_name, zhp->zpool_name);
2199 	zc.zc_cookie = new_version;
2200 
2201 	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
2202 		return (zpool_standard_error_fmt(hdl, errno,
2203 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
2204 		    zhp->zpool_name));
2205 	return (0);
2206 }
2207 
2208 void
2209 zpool_set_history_str(const char *subcommand, int argc, char **argv,
2210     char *history_str)
2211 {
2212 	int i;
2213 
2214 	(void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
2215 	for (i = 1; i < argc; i++) {
2216 		if (strlen(history_str) + 1 + strlen(argv[i]) >
2217 		    HIS_MAX_RECORD_LEN)
2218 			break;
2219 		(void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
2220 		(void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
2221 	}
2222 }
2223 
2224 /*
2225  * Stage command history for logging.
2226  */
2227 int
2228 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
2229 {
2230 	if (history_str == NULL)
2231 		return (EINVAL);
2232 
2233 	if (strlen(history_str) > HIS_MAX_RECORD_LEN)
2234 		return (EINVAL);
2235 
2236 	if (hdl->libzfs_log_str != NULL)
2237 		free(hdl->libzfs_log_str);
2238 
2239 	if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
2240 		return (no_memory(hdl));
2241 
2242 	return (0);
2243 }
2244 
2245 /*
2246  * Perform ioctl to get some command history of a pool.
2247  *
2248  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
2249  * logical offset of the history buffer to start reading from.
2250  *
2251  * Upon return, 'off' is the next logical offset to read from and
2252  * 'len' is the actual amount of bytes read into 'buf'.
2253  */
2254 static int
2255 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
2256 {
2257 	zfs_cmd_t zc = { 0 };
2258 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2259 
2260 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2261 
2262 	zc.zc_history = (uint64_t)(uintptr_t)buf;
2263 	zc.zc_history_len = *len;
2264 	zc.zc_history_offset = *off;
2265 
2266 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
2267 		switch (errno) {
2268 		case EPERM:
2269 			return (zfs_error_fmt(hdl, EZFS_PERM,
2270 			    dgettext(TEXT_DOMAIN,
2271 			    "cannot show history for pool '%s'"),
2272 			    zhp->zpool_name));
2273 		case ENOENT:
2274 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
2275 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2276 			    "'%s'"), zhp->zpool_name));
2277 		case ENOTSUP:
2278 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
2279 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
2280 			    "'%s', pool must be upgraded"), zhp->zpool_name));
2281 		default:
2282 			return (zpool_standard_error_fmt(hdl, errno,
2283 			    dgettext(TEXT_DOMAIN,
2284 			    "cannot get history for '%s'"), zhp->zpool_name));
2285 		}
2286 	}
2287 
2288 	*len = zc.zc_history_len;
2289 	*off = zc.zc_history_offset;
2290 
2291 	return (0);
2292 }
2293 
2294 /*
2295  * Process the buffer of nvlists, unpacking and storing each nvlist record
2296  * into 'records'.  'leftover' is set to the number of bytes that weren't
2297  * processed as there wasn't a complete record.
2298  */
2299 static int
2300 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
2301     nvlist_t ***records, uint_t *numrecords)
2302 {
2303 	uint64_t reclen;
2304 	nvlist_t *nv;
2305 	int i;
2306 
2307 	while (bytes_read > sizeof (reclen)) {
2308 
2309 		/* get length of packed record (stored as little endian) */
2310 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
2311 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
2312 
2313 		if (bytes_read < sizeof (reclen) + reclen)
2314 			break;
2315 
2316 		/* unpack record */
2317 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
2318 			return (ENOMEM);
2319 		bytes_read -= sizeof (reclen) + reclen;
2320 		buf += sizeof (reclen) + reclen;
2321 
2322 		/* add record to nvlist array */
2323 		(*numrecords)++;
2324 		if (ISP2(*numrecords + 1)) {
2325 			*records = realloc(*records,
2326 			    *numrecords * 2 * sizeof (nvlist_t *));
2327 		}
2328 		(*records)[*numrecords - 1] = nv;
2329 	}
2330 
2331 	*leftover = bytes_read;
2332 	return (0);
2333 }
2334 
2335 #define	HIS_BUF_LEN	(128*1024)
2336 
2337 /*
2338  * Retrieve the command history of a pool.
2339  */
2340 int
2341 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
2342 {
2343 	char buf[HIS_BUF_LEN];
2344 	uint64_t off = 0;
2345 	nvlist_t **records = NULL;
2346 	uint_t numrecords = 0;
2347 	int err, i;
2348 
2349 	do {
2350 		uint64_t bytes_read = sizeof (buf);
2351 		uint64_t leftover;
2352 
2353 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
2354 			break;
2355 
2356 		/* if nothing else was read in, we're at EOF, just return */
2357 		if (!bytes_read)
2358 			break;
2359 
2360 		if ((err = zpool_history_unpack(buf, bytes_read,
2361 		    &leftover, &records, &numrecords)) != 0)
2362 			break;
2363 		off -= leftover;
2364 
2365 		/* CONSTCOND */
2366 	} while (1);
2367 
2368 	if (!err) {
2369 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
2370 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
2371 		    records, numrecords) == 0);
2372 	}
2373 	for (i = 0; i < numrecords; i++)
2374 		nvlist_free(records[i]);
2375 	free(records);
2376 
2377 	return (err);
2378 }
2379 
2380 void
2381 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
2382     char *pathname, size_t len)
2383 {
2384 	zfs_cmd_t zc = { 0 };
2385 	boolean_t mounted = B_FALSE;
2386 	char *mntpnt = NULL;
2387 	char dsname[MAXNAMELEN];
2388 
2389 	if (dsobj == 0) {
2390 		/* special case for the MOS */
2391 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
2392 		return;
2393 	}
2394 
2395 	/* get the dataset's name */
2396 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2397 	zc.zc_obj = dsobj;
2398 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
2399 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
2400 		/* just write out a path of two object numbers */
2401 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
2402 		    dsobj, obj);
2403 		return;
2404 	}
2405 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
2406 
2407 	/* find out if the dataset is mounted */
2408 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
2409 
2410 	/* get the corrupted object's path */
2411 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2412 	zc.zc_obj = obj;
2413 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2414 	    &zc) == 0) {
2415 		if (mounted) {
2416 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2417 			    zc.zc_value);
2418 		} else {
2419 			(void) snprintf(pathname, len, "%s:%s",
2420 			    dsname, zc.zc_value);
2421 		}
2422 	} else {
2423 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2424 	}
2425 	free(mntpnt);
2426 }
2427 
2428 #define	RDISK_ROOT	"/dev/rdsk"
2429 #define	BACKUP_SLICE	"s2"
2430 /*
2431  * Don't start the slice at the default block of 34; many storage
2432  * devices will use a stripe width of 128k, so start there instead.
2433  */
2434 #define	NEW_START_BLOCK	256
2435 
2436 /*
2437  * determine where a partition starts on a disk in the current
2438  * configuration
2439  */
2440 static diskaddr_t
2441 find_start_block(nvlist_t *config)
2442 {
2443 	nvlist_t **child;
2444 	uint_t c, children;
2445 	char *path;
2446 	diskaddr_t sb = MAXOFFSET_T;
2447 	int fd;
2448 	char diskname[MAXPATHLEN];
2449 	uint64_t wholedisk;
2450 
2451 	if (nvlist_lookup_nvlist_array(config,
2452 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2453 		if (nvlist_lookup_uint64(config,
2454 		    ZPOOL_CONFIG_WHOLE_DISK,
2455 		    &wholedisk) != 0 || !wholedisk) {
2456 			return (MAXOFFSET_T);
2457 		}
2458 		if (nvlist_lookup_string(config,
2459 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2460 			return (MAXOFFSET_T);
2461 		}
2462 
2463 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2464 		    RDISK_ROOT, strrchr(path, '/'));
2465 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2466 			struct dk_gpt *vtoc;
2467 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2468 				sb = vtoc->efi_parts[0].p_start;
2469 				efi_free(vtoc);
2470 			}
2471 			(void) close(fd);
2472 		}
2473 		return (sb);
2474 	}
2475 
2476 	for (c = 0; c < children; c++) {
2477 		sb = find_start_block(child[c]);
2478 		if (sb != MAXOFFSET_T) {
2479 			return (sb);
2480 		}
2481 	}
2482 	return (MAXOFFSET_T);
2483 }
2484 
2485 /*
2486  * Label an individual disk.  The name provided is the short name,
2487  * stripped of any leading /dev path.
2488  */
2489 int
2490 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2491 {
2492 	char path[MAXPATHLEN];
2493 	struct dk_gpt *vtoc;
2494 	int fd;
2495 	size_t resv = EFI_MIN_RESV_SIZE;
2496 	uint64_t slice_size;
2497 	diskaddr_t start_block;
2498 	char errbuf[1024];
2499 
2500 	if (zhp) {
2501 		nvlist_t *nvroot;
2502 
2503 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2504 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2505 
2506 		if (zhp->zpool_start_block == 0)
2507 			start_block = find_start_block(nvroot);
2508 		else
2509 			start_block = zhp->zpool_start_block;
2510 		zhp->zpool_start_block = start_block;
2511 	} else {
2512 		/* new pool */
2513 		start_block = NEW_START_BLOCK;
2514 	}
2515 
2516 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2517 	    BACKUP_SLICE);
2518 
2519 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2520 		/*
2521 		 * This shouldn't happen.  We've long since verified that this
2522 		 * is a valid device.
2523 		 */
2524 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2525 		    "label '%s': unable to open device"), name);
2526 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2527 	}
2528 
2529 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2530 		/*
2531 		 * The only way this can fail is if we run out of memory, or we
2532 		 * were unable to read the disk's capacity
2533 		 */
2534 		if (errno == ENOMEM)
2535 			(void) no_memory(hdl);
2536 
2537 		(void) close(fd);
2538 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2539 		    "label '%s': unable to read disk capacity"), name);
2540 
2541 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2542 	}
2543 
2544 	slice_size = vtoc->efi_last_u_lba + 1;
2545 	slice_size -= EFI_MIN_RESV_SIZE;
2546 	if (start_block == MAXOFFSET_T)
2547 		start_block = NEW_START_BLOCK;
2548 	slice_size -= start_block;
2549 
2550 	vtoc->efi_parts[0].p_start = start_block;
2551 	vtoc->efi_parts[0].p_size = slice_size;
2552 
2553 	/*
2554 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2555 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2556 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2557 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2558 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2559 	 * can get, in the absence of V_OTHER.
2560 	 */
2561 	vtoc->efi_parts[0].p_tag = V_USR;
2562 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2563 
2564 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2565 	vtoc->efi_parts[8].p_size = resv;
2566 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2567 
2568 	if (efi_write(fd, vtoc) != 0) {
2569 		/*
2570 		 * Some block drivers (like pcata) may not support EFI
2571 		 * GPT labels.  Print out a helpful error message dir-
2572 		 * ecting the user to manually label the disk and give
2573 		 * a specific slice.
2574 		 */
2575 		(void) close(fd);
2576 		efi_free(vtoc);
2577 
2578 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2579 		    "cannot label '%s': try using fdisk(1M) and then "
2580 		    "provide a specific slice"), name);
2581 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2582 	}
2583 
2584 	(void) close(fd);
2585 	efi_free(vtoc);
2586 	return (0);
2587 }
2588