xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_pool.c (revision 8654d0253136055bd4cc2423d87378e8a37f2eb5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <alloca.h>
30 #include <assert.h>
31 #include <ctype.h>
32 #include <errno.h>
33 #include <devid.h>
34 #include <dirent.h>
35 #include <fcntl.h>
36 #include <libintl.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <sys/efi_partition.h>
42 #include <sys/vtoc.h>
43 #include <sys/zfs_ioctl.h>
44 #include <sys/zio.h>
45 #include <strings.h>
46 
47 #include "zfs_namecheck.h"
48 #include "zfs_prop.h"
49 #include "libzfs_impl.h"
50 
51 /*
52  * Validate the given pool name, optionally putting an extended error message in
53  * 'buf'.
54  */
55 static boolean_t
56 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
57 {
58 	namecheck_err_t why;
59 	char what;
60 	int ret;
61 
62 	ret = pool_namecheck(pool, &why, &what);
63 
64 	/*
65 	 * The rules for reserved pool names were extended at a later point.
66 	 * But we need to support users with existing pools that may now be
67 	 * invalid.  So we only check for this expanded set of names during a
68 	 * create (or import), and only in userland.
69 	 */
70 	if (ret == 0 && !isopen &&
71 	    (strncmp(pool, "mirror", 6) == 0 ||
72 	    strncmp(pool, "raidz", 5) == 0 ||
73 	    strncmp(pool, "spare", 5) == 0 ||
74 	    strcmp(pool, "log") == 0)) {
75 		zfs_error_aux(hdl,
76 		    dgettext(TEXT_DOMAIN, "name is reserved"));
77 		return (B_FALSE);
78 	}
79 
80 
81 	if (ret != 0) {
82 		if (hdl != NULL) {
83 			switch (why) {
84 			case NAME_ERR_TOOLONG:
85 				zfs_error_aux(hdl,
86 				    dgettext(TEXT_DOMAIN, "name is too long"));
87 				break;
88 
89 			case NAME_ERR_INVALCHAR:
90 				zfs_error_aux(hdl,
91 				    dgettext(TEXT_DOMAIN, "invalid character "
92 				    "'%c' in pool name"), what);
93 				break;
94 
95 			case NAME_ERR_NOLETTER:
96 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
97 				    "name must begin with a letter"));
98 				break;
99 
100 			case NAME_ERR_RESERVED:
101 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
102 				    "name is reserved"));
103 				break;
104 
105 			case NAME_ERR_DISKLIKE:
106 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
107 				    "pool name is reserved"));
108 				break;
109 
110 			case NAME_ERR_LEADING_SLASH:
111 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
112 				    "leading slash in name"));
113 				break;
114 
115 			case NAME_ERR_EMPTY_COMPONENT:
116 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
117 				    "empty component in name"));
118 				break;
119 
120 			case NAME_ERR_TRAILING_SLASH:
121 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
122 				    "trailing slash in name"));
123 				break;
124 
125 			case NAME_ERR_MULTIPLE_AT:
126 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
127 				    "multiple '@' delimiters in name"));
128 				break;
129 
130 			}
131 		}
132 		return (B_FALSE);
133 	}
134 
135 	return (B_TRUE);
136 }
137 
138 static int
139 zpool_get_all_props(zpool_handle_t *zhp)
140 {
141 	zfs_cmd_t zc = { 0 };
142 	libzfs_handle_t *hdl = zhp->zpool_hdl;
143 
144 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
145 
146 	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
147 		return (-1);
148 
149 	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
150 		if (errno == ENOMEM) {
151 			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
152 				zcmd_free_nvlists(&zc);
153 				return (-1);
154 			}
155 		} else {
156 			zcmd_free_nvlists(&zc);
157 			return (-1);
158 		}
159 	}
160 
161 	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
162 		zcmd_free_nvlists(&zc);
163 		return (-1);
164 	}
165 
166 	zcmd_free_nvlists(&zc);
167 
168 	return (0);
169 }
170 
171 /*
172  * Open a handle to the given pool, even if the pool is currently in the FAULTED
173  * state.
174  */
175 zpool_handle_t *
176 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
177 {
178 	zpool_handle_t *zhp;
179 	boolean_t missing;
180 
181 	/*
182 	 * Make sure the pool name is valid.
183 	 */
184 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
185 		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
186 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
187 		    pool);
188 		return (NULL);
189 	}
190 
191 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
192 		return (NULL);
193 
194 	zhp->zpool_hdl = hdl;
195 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
196 
197 	if (zpool_refresh_stats(zhp, &missing) != 0) {
198 		zpool_close(zhp);
199 		return (NULL);
200 	}
201 
202 	if (missing) {
203 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
204 		    "no such pool"));
205 		(void) zfs_error_fmt(hdl, EZFS_NOENT,
206 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
207 		    pool);
208 		zpool_close(zhp);
209 		return (NULL);
210 	}
211 
212 	return (zhp);
213 }
214 
215 /*
216  * Like the above, but silent on error.  Used when iterating over pools (because
217  * the configuration cache may be out of date).
218  */
219 int
220 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
221 {
222 	zpool_handle_t *zhp;
223 	boolean_t missing;
224 
225 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
226 		return (-1);
227 
228 	zhp->zpool_hdl = hdl;
229 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
230 
231 	if (zpool_refresh_stats(zhp, &missing) != 0) {
232 		zpool_close(zhp);
233 		return (-1);
234 	}
235 
236 	if (missing) {
237 		zpool_close(zhp);
238 		*ret = NULL;
239 		return (0);
240 	}
241 
242 	*ret = zhp;
243 	return (0);
244 }
245 
246 /*
247  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
248  * state.
249  */
250 zpool_handle_t *
251 zpool_open(libzfs_handle_t *hdl, const char *pool)
252 {
253 	zpool_handle_t *zhp;
254 
255 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
256 		return (NULL);
257 
258 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
259 		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
260 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
261 		zpool_close(zhp);
262 		return (NULL);
263 	}
264 
265 	return (zhp);
266 }
267 
268 /*
269  * Close the handle.  Simply frees the memory associated with the handle.
270  */
271 void
272 zpool_close(zpool_handle_t *zhp)
273 {
274 	if (zhp->zpool_config)
275 		nvlist_free(zhp->zpool_config);
276 	if (zhp->zpool_old_config)
277 		nvlist_free(zhp->zpool_old_config);
278 	if (zhp->zpool_props)
279 		nvlist_free(zhp->zpool_props);
280 	free(zhp);
281 }
282 
283 /*
284  * Return the name of the pool.
285  */
286 const char *
287 zpool_get_name(zpool_handle_t *zhp)
288 {
289 	return (zhp->zpool_name);
290 }
291 
292 /*
293  * Return the GUID of the pool.
294  */
295 uint64_t
296 zpool_get_guid(zpool_handle_t *zhp)
297 {
298 	uint64_t guid;
299 
300 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
301 	    &guid) == 0);
302 	return (guid);
303 }
304 
305 /*
306  * Return the version of the pool.
307  */
308 uint64_t
309 zpool_get_version(zpool_handle_t *zhp)
310 {
311 	uint64_t version;
312 
313 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
314 	    &version) == 0);
315 
316 	return (version);
317 }
318 
319 /*
320  * Return the amount of space currently consumed by the pool.
321  */
322 uint64_t
323 zpool_get_space_used(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_alloc);
335 }
336 
337 /*
338  * Return the total space in the pool.
339  */
340 uint64_t
341 zpool_get_space_total(zpool_handle_t *zhp)
342 {
343 	nvlist_t *nvroot;
344 	vdev_stat_t *vs;
345 	uint_t vsc;
346 
347 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
348 	    &nvroot) == 0);
349 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
350 	    (uint64_t **)&vs, &vsc) == 0);
351 
352 	return (vs->vs_space);
353 }
354 
355 /*
356  * Return the alternate root for this pool, if any.
357  */
358 int
359 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
360 {
361 	zfs_cmd_t zc = { 0 };
362 
363 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
364 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
365 	    zc.zc_value[0] == '\0')
366 		return (-1);
367 
368 	(void) strlcpy(buf, zc.zc_value, buflen);
369 
370 	return (0);
371 }
372 
373 /*
374  * Return the state of the pool (ACTIVE or UNAVAILABLE)
375  */
376 int
377 zpool_get_state(zpool_handle_t *zhp)
378 {
379 	return (zhp->zpool_state);
380 }
381 
382 /*
383  * Create the named pool, using the provided vdev list.  It is assumed
384  * that the consumer has already validated the contents of the nvlist, so we
385  * don't have to worry about error semantics.
386  */
387 int
388 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
389     const char *altroot)
390 {
391 	zfs_cmd_t zc = { 0 };
392 	char msg[1024];
393 
394 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
395 	    "cannot create '%s'"), pool);
396 
397 	if (!zpool_name_valid(hdl, B_FALSE, pool))
398 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
399 
400 	if (altroot != NULL && altroot[0] != '/')
401 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
402 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
403 
404 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
405 		return (-1);
406 
407 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
408 
409 	if (altroot != NULL)
410 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
411 
412 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
413 		zcmd_free_nvlists(&zc);
414 
415 		switch (errno) {
416 		case EBUSY:
417 			/*
418 			 * This can happen if the user has specified the same
419 			 * device multiple times.  We can't reliably detect this
420 			 * until we try to add it and see we already have a
421 			 * label.
422 			 */
423 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
424 			    "one or more vdevs refer to the same device"));
425 			return (zfs_error(hdl, EZFS_BADDEV, msg));
426 
427 		case EOVERFLOW:
428 			/*
429 			 * This occurs when one of the devices is below
430 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
431 			 * device was the problem device since there's no
432 			 * reliable way to determine device size from userland.
433 			 */
434 			{
435 				char buf[64];
436 
437 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
438 
439 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
440 				    "one or more devices is less than the "
441 				    "minimum size (%s)"), buf);
442 			}
443 			return (zfs_error(hdl, EZFS_BADDEV, msg));
444 
445 		case ENOSPC:
446 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
447 			    "one or more devices is out of space"));
448 			return (zfs_error(hdl, EZFS_BADDEV, msg));
449 
450 		default:
451 			return (zpool_standard_error(hdl, errno, msg));
452 		}
453 	}
454 
455 	zcmd_free_nvlists(&zc);
456 
457 	/*
458 	 * If this is an alternate root pool, then we automatically set the
459 	 * mountpoint of the root dataset to be '/'.
460 	 */
461 	if (altroot != NULL) {
462 		zfs_handle_t *zhp;
463 
464 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
465 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
466 		    "/") == 0);
467 
468 		zfs_close(zhp);
469 	}
470 
471 	return (0);
472 }
473 
474 /*
475  * Destroy the given pool.  It is up to the caller to ensure that there are no
476  * datasets left in the pool.
477  */
478 int
479 zpool_destroy(zpool_handle_t *zhp)
480 {
481 	zfs_cmd_t zc = { 0 };
482 	zfs_handle_t *zfp = NULL;
483 	libzfs_handle_t *hdl = zhp->zpool_hdl;
484 	char msg[1024];
485 
486 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
487 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
488 	    ZFS_TYPE_FILESYSTEM)) == NULL)
489 		return (-1);
490 
491 	if (zpool_remove_zvol_links(zhp) != 0)
492 		return (-1);
493 
494 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
495 
496 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
497 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
498 		    "cannot destroy '%s'"), zhp->zpool_name);
499 
500 		if (errno == EROFS) {
501 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
502 			    "one or more devices is read only"));
503 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
504 		} else {
505 			(void) zpool_standard_error(hdl, errno, msg);
506 		}
507 
508 		if (zfp)
509 			zfs_close(zfp);
510 		return (-1);
511 	}
512 
513 	if (zfp) {
514 		remove_mountpoint(zfp);
515 		zfs_close(zfp);
516 	}
517 
518 	return (0);
519 }
520 
521 /*
522  * Add the given vdevs to the pool.  The caller must have already performed the
523  * necessary verification to ensure that the vdev specification is well-formed.
524  */
525 int
526 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
527 {
528 	zfs_cmd_t zc = { 0 };
529 	int ret;
530 	libzfs_handle_t *hdl = zhp->zpool_hdl;
531 	char msg[1024];
532 	nvlist_t **spares;
533 	uint_t nspares;
534 
535 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
536 	    "cannot add to '%s'"), zhp->zpool_name);
537 
538 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
539 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
540 	    &spares, &nspares) == 0) {
541 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
542 		    "upgraded to add hot spares"));
543 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
544 	}
545 
546 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
547 		return (-1);
548 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
549 
550 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
551 		switch (errno) {
552 		case EBUSY:
553 			/*
554 			 * This can happen if the user has specified the same
555 			 * device multiple times.  We can't reliably detect this
556 			 * until we try to add it and see we already have a
557 			 * label.
558 			 */
559 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
560 			    "one or more vdevs refer to the same device"));
561 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
562 			break;
563 
564 		case EOVERFLOW:
565 			/*
566 			 * This occurrs when one of the devices is below
567 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
568 			 * device was the problem device since there's no
569 			 * reliable way to determine device size from userland.
570 			 */
571 			{
572 				char buf[64];
573 
574 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
575 
576 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
577 				    "device is less than the minimum "
578 				    "size (%s)"), buf);
579 			}
580 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
581 			break;
582 
583 		case ENOTSUP:
584 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
585 			    "pool must be upgraded to add these vdevs"));
586 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
587 			break;
588 
589 		case EDOM:
590 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
591 			    "root pool can not have multiple vdevs"
592 			    " or separate logs"));
593 			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
594 			break;
595 
596 		default:
597 			(void) zpool_standard_error(hdl, errno, msg);
598 		}
599 
600 		ret = -1;
601 	} else {
602 		ret = 0;
603 	}
604 
605 	zcmd_free_nvlists(&zc);
606 
607 	return (ret);
608 }
609 
610 /*
611  * Exports the pool from the system.  The caller must ensure that there are no
612  * mounted datasets in the pool.
613  */
614 int
615 zpool_export(zpool_handle_t *zhp)
616 {
617 	zfs_cmd_t zc = { 0 };
618 
619 	if (zpool_remove_zvol_links(zhp) != 0)
620 		return (-1);
621 
622 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
623 
624 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
625 		return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
626 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
627 		    zhp->zpool_name));
628 	return (0);
629 }
630 
631 /*
632  * Import the given pool using the known configuration.  The configuration
633  * should have come from zpool_find_import().  The 'newname' and 'altroot'
634  * parameters control whether the pool is imported with a different name or with
635  * an alternate root, respectively.
636  */
637 int
638 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
639     const char *altroot)
640 {
641 	zfs_cmd_t zc = { 0 };
642 	char *thename;
643 	char *origname;
644 	int ret;
645 
646 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
647 	    &origname) == 0);
648 
649 	if (newname != NULL) {
650 		if (!zpool_name_valid(hdl, B_FALSE, newname))
651 			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
652 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
653 			    newname));
654 		thename = (char *)newname;
655 	} else {
656 		thename = origname;
657 	}
658 
659 	if (altroot != NULL && altroot[0] != '/')
660 		return (zfs_error_fmt(hdl, EZFS_BADPATH,
661 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
662 		    altroot));
663 
664 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
665 
666 	if (altroot != NULL)
667 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
668 	else
669 		zc.zc_value[0] = '\0';
670 
671 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
672 	    &zc.zc_guid) == 0);
673 
674 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
675 		return (-1);
676 
677 	ret = 0;
678 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
679 		char desc[1024];
680 		if (newname == NULL)
681 			(void) snprintf(desc, sizeof (desc),
682 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
683 			    thename);
684 		else
685 			(void) snprintf(desc, sizeof (desc),
686 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
687 			    origname, thename);
688 
689 		switch (errno) {
690 		case ENOTSUP:
691 			/*
692 			 * Unsupported version.
693 			 */
694 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
695 			break;
696 
697 		case EINVAL:
698 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
699 			break;
700 
701 		default:
702 			(void) zpool_standard_error(hdl, errno, desc);
703 		}
704 
705 		ret = -1;
706 	} else {
707 		zpool_handle_t *zhp;
708 		/*
709 		 * This should never fail, but play it safe anyway.
710 		 */
711 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
712 			ret = -1;
713 		} else if (zhp != NULL) {
714 			ret = zpool_create_zvol_links(zhp);
715 			zpool_close(zhp);
716 		}
717 	}
718 
719 	zcmd_free_nvlists(&zc);
720 	return (ret);
721 }
722 
723 /*
724  * Scrub the pool.
725  */
726 int
727 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
728 {
729 	zfs_cmd_t zc = { 0 };
730 	char msg[1024];
731 	libzfs_handle_t *hdl = zhp->zpool_hdl;
732 
733 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
734 	zc.zc_cookie = type;
735 
736 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
737 		return (0);
738 
739 	(void) snprintf(msg, sizeof (msg),
740 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
741 
742 	if (errno == EBUSY)
743 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
744 	else
745 		return (zpool_standard_error(hdl, errno, msg));
746 }
747 
748 /*
749  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
750  * spare; but FALSE if its an INUSE spare.
751  */
752 static nvlist_t *
753 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
754     boolean_t *avail_spare)
755 {
756 	uint_t c, children;
757 	nvlist_t **child;
758 	uint64_t theguid, present;
759 	char *path;
760 	uint64_t wholedisk = 0;
761 	nvlist_t *ret;
762 
763 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
764 
765 	if (search == NULL &&
766 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
767 		/*
768 		 * If the device has never been present since import, the only
769 		 * reliable way to match the vdev is by GUID.
770 		 */
771 		if (theguid == guid)
772 			return (nv);
773 	} else if (search != NULL &&
774 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
775 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
776 		    &wholedisk);
777 		if (wholedisk) {
778 			/*
779 			 * For whole disks, the internal path has 's0', but the
780 			 * path passed in by the user doesn't.
781 			 */
782 			if (strlen(search) == strlen(path) - 2 &&
783 			    strncmp(search, path, strlen(search)) == 0)
784 				return (nv);
785 		} else if (strcmp(search, path) == 0) {
786 			return (nv);
787 		}
788 	}
789 
790 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
791 	    &child, &children) != 0)
792 		return (NULL);
793 
794 	for (c = 0; c < children; c++)
795 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
796 		    avail_spare)) != NULL)
797 			return (ret);
798 
799 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
800 	    &child, &children) == 0) {
801 		for (c = 0; c < children; c++) {
802 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
803 			    avail_spare)) != NULL) {
804 				*avail_spare = B_TRUE;
805 				return (ret);
806 			}
807 		}
808 	}
809 
810 	return (NULL);
811 }
812 
813 nvlist_t *
814 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
815 {
816 	char buf[MAXPATHLEN];
817 	const char *search;
818 	char *end;
819 	nvlist_t *nvroot;
820 	uint64_t guid;
821 
822 	guid = strtoull(path, &end, 10);
823 	if (guid != 0 && *end == '\0') {
824 		search = NULL;
825 	} else if (path[0] != '/') {
826 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
827 		search = buf;
828 	} else {
829 		search = path;
830 	}
831 
832 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
833 	    &nvroot) == 0);
834 
835 	*avail_spare = B_FALSE;
836 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
837 }
838 
839 /*
840  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
841  */
842 static boolean_t
843 is_spare(zpool_handle_t *zhp, uint64_t guid)
844 {
845 	uint64_t spare_guid;
846 	nvlist_t *nvroot;
847 	nvlist_t **spares;
848 	uint_t nspares;
849 	int i;
850 
851 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
852 	    &nvroot) == 0);
853 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
854 	    &spares, &nspares) == 0) {
855 		for (i = 0; i < nspares; i++) {
856 			verify(nvlist_lookup_uint64(spares[i],
857 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
858 			if (guid == spare_guid)
859 				return (B_TRUE);
860 		}
861 	}
862 
863 	return (B_FALSE);
864 }
865 
866 /*
867  * Bring the specified vdev online.   The 'flags' parameter is a set of the
868  * ZFS_ONLINE_* flags.
869  */
870 int
871 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
872     vdev_state_t *newstate)
873 {
874 	zfs_cmd_t zc = { 0 };
875 	char msg[1024];
876 	nvlist_t *tgt;
877 	boolean_t avail_spare;
878 	libzfs_handle_t *hdl = zhp->zpool_hdl;
879 
880 	(void) snprintf(msg, sizeof (msg),
881 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
882 
883 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
884 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
885 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
886 
887 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
888 
889 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
890 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
891 
892 	zc.zc_cookie = VDEV_STATE_ONLINE;
893 	zc.zc_obj = flags;
894 
895 
896 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) != 0)
897 		return (zpool_standard_error(hdl, errno, msg));
898 
899 	*newstate = zc.zc_cookie;
900 	return (0);
901 }
902 
903 /*
904  * Take the specified vdev offline
905  */
906 int
907 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
908 {
909 	zfs_cmd_t zc = { 0 };
910 	char msg[1024];
911 	nvlist_t *tgt;
912 	boolean_t avail_spare;
913 	libzfs_handle_t *hdl = zhp->zpool_hdl;
914 
915 	(void) snprintf(msg, sizeof (msg),
916 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
917 
918 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
919 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
920 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
921 
922 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
923 
924 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
925 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
926 
927 	zc.zc_cookie = VDEV_STATE_OFFLINE;
928 	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
929 
930 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
931 		return (0);
932 
933 	switch (errno) {
934 	case EBUSY:
935 
936 		/*
937 		 * There are no other replicas of this device.
938 		 */
939 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
940 
941 	default:
942 		return (zpool_standard_error(hdl, errno, msg));
943 	}
944 }
945 
946 /*
947  * Mark the given vdev faulted.
948  */
949 int
950 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid)
951 {
952 	zfs_cmd_t zc = { 0 };
953 	char msg[1024];
954 	libzfs_handle_t *hdl = zhp->zpool_hdl;
955 
956 	(void) snprintf(msg, sizeof (msg),
957 	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
958 
959 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
960 	zc.zc_guid = guid;
961 	zc.zc_cookie = VDEV_STATE_FAULTED;
962 
963 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
964 		return (0);
965 
966 	switch (errno) {
967 	case EBUSY:
968 
969 		/*
970 		 * There are no other replicas of this device.
971 		 */
972 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
973 
974 	default:
975 		return (zpool_standard_error(hdl, errno, msg));
976 	}
977 
978 }
979 
980 /*
981  * Mark the given vdev degraded.
982  */
983 int
984 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid)
985 {
986 	zfs_cmd_t zc = { 0 };
987 	char msg[1024];
988 	libzfs_handle_t *hdl = zhp->zpool_hdl;
989 
990 	(void) snprintf(msg, sizeof (msg),
991 	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
992 
993 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
994 	zc.zc_guid = guid;
995 	zc.zc_cookie = VDEV_STATE_DEGRADED;
996 
997 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
998 		return (0);
999 
1000 	return (zpool_standard_error(hdl, errno, msg));
1001 }
1002 
1003 /*
1004  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
1005  * a hot spare.
1006  */
1007 static boolean_t
1008 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
1009 {
1010 	nvlist_t **child;
1011 	uint_t c, children;
1012 	char *type;
1013 
1014 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
1015 	    &children) == 0) {
1016 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
1017 		    &type) == 0);
1018 
1019 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
1020 		    children == 2 && child[which] == tgt)
1021 			return (B_TRUE);
1022 
1023 		for (c = 0; c < children; c++)
1024 			if (is_replacing_spare(child[c], tgt, which))
1025 				return (B_TRUE);
1026 	}
1027 
1028 	return (B_FALSE);
1029 }
1030 
1031 /*
1032  * Attach new_disk (fully described by nvroot) to old_disk.
1033  * If 'replacing' is specified, the new disk will replace the old one.
1034  */
1035 int
1036 zpool_vdev_attach(zpool_handle_t *zhp,
1037     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
1038 {
1039 	zfs_cmd_t zc = { 0 };
1040 	char msg[1024];
1041 	int ret;
1042 	nvlist_t *tgt;
1043 	boolean_t avail_spare;
1044 	uint64_t val, is_log;
1045 	char *path;
1046 	nvlist_t **child;
1047 	uint_t children;
1048 	nvlist_t *config_root;
1049 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1050 
1051 	if (replacing)
1052 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1053 		    "cannot replace %s with %s"), old_disk, new_disk);
1054 	else
1055 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1056 		    "cannot attach %s to %s"), new_disk, old_disk);
1057 
1058 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1059 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
1060 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1061 
1062 	if (avail_spare)
1063 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1064 
1065 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1066 	zc.zc_cookie = replacing;
1067 
1068 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
1069 	    &child, &children) != 0 || children != 1) {
1070 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1071 		    "new device must be a single disk"));
1072 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
1073 	}
1074 
1075 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
1076 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
1077 
1078 	/*
1079 	 * If the target is a hot spare that has been swapped in, we can only
1080 	 * replace it with another hot spare.
1081 	 */
1082 	if (replacing &&
1083 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
1084 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1085 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
1086 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
1087 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1088 		    "can only be replaced by another hot spare"));
1089 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1090 	}
1091 
1092 	/*
1093 	 * If we are attempting to replace a spare, it canot be applied to an
1094 	 * already spared device.
1095 	 */
1096 	if (replacing &&
1097 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1098 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1099 	    is_replacing_spare(config_root, tgt, 0)) {
1100 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1101 		    "device has already been replaced with a spare"));
1102 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1103 	}
1104 
1105 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1106 		return (-1);
1107 
1108 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1109 
1110 	zcmd_free_nvlists(&zc);
1111 
1112 	if (ret == 0)
1113 		return (0);
1114 
1115 	switch (errno) {
1116 	case ENOTSUP:
1117 		/*
1118 		 * Can't attach to or replace this type of vdev.
1119 		 */
1120 		if (replacing) {
1121 			is_log = B_FALSE;
1122 			(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_LOG,
1123 			    &is_log);
1124 			if (is_log)
1125 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1126 				    "cannot replace a log with a spare"));
1127 			else
1128 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1129 				    "cannot replace a replacing device"));
1130 		} else {
1131 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1132 			    "can only attach to mirrors and top-level "
1133 			    "disks"));
1134 		}
1135 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1136 		break;
1137 
1138 	case EINVAL:
1139 		/*
1140 		 * The new device must be a single disk.
1141 		 */
1142 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1143 		    "new device must be a single disk"));
1144 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1145 		break;
1146 
1147 	case EBUSY:
1148 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1149 		    new_disk);
1150 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1151 		break;
1152 
1153 	case EOVERFLOW:
1154 		/*
1155 		 * The new device is too small.
1156 		 */
1157 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1158 		    "device is too small"));
1159 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1160 		break;
1161 
1162 	case EDOM:
1163 		/*
1164 		 * The new device has a different alignment requirement.
1165 		 */
1166 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1167 		    "devices have different sector alignment"));
1168 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1169 		break;
1170 
1171 	case ENAMETOOLONG:
1172 		/*
1173 		 * The resulting top-level vdev spec won't fit in the label.
1174 		 */
1175 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1176 		break;
1177 
1178 	default:
1179 		(void) zpool_standard_error(hdl, errno, msg);
1180 	}
1181 
1182 	return (-1);
1183 }
1184 
1185 /*
1186  * Detach the specified device.
1187  */
1188 int
1189 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1190 {
1191 	zfs_cmd_t zc = { 0 };
1192 	char msg[1024];
1193 	nvlist_t *tgt;
1194 	boolean_t avail_spare;
1195 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1196 
1197 	(void) snprintf(msg, sizeof (msg),
1198 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1199 
1200 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1201 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1202 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1203 
1204 	if (avail_spare)
1205 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1206 
1207 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1208 
1209 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1210 		return (0);
1211 
1212 	switch (errno) {
1213 
1214 	case ENOTSUP:
1215 		/*
1216 		 * Can't detach from this type of vdev.
1217 		 */
1218 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1219 		    "applicable to mirror and replacing vdevs"));
1220 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1221 		break;
1222 
1223 	case EBUSY:
1224 		/*
1225 		 * There are no other replicas of this device.
1226 		 */
1227 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1228 		break;
1229 
1230 	default:
1231 		(void) zpool_standard_error(hdl, errno, msg);
1232 	}
1233 
1234 	return (-1);
1235 }
1236 
1237 /*
1238  * Remove the given device.  Currently, this is supported only for hot spares.
1239  */
1240 int
1241 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1242 {
1243 	zfs_cmd_t zc = { 0 };
1244 	char msg[1024];
1245 	nvlist_t *tgt;
1246 	boolean_t avail_spare;
1247 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1248 
1249 	(void) snprintf(msg, sizeof (msg),
1250 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1251 
1252 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1253 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1254 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1255 
1256 	if (!avail_spare) {
1257 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1258 		    "only inactive hot spares can be removed"));
1259 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1260 	}
1261 
1262 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1263 
1264 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1265 		return (0);
1266 
1267 	return (zpool_standard_error(hdl, errno, msg));
1268 }
1269 
1270 /*
1271  * Clear the errors for the pool, or the particular device if specified.
1272  */
1273 int
1274 zpool_clear(zpool_handle_t *zhp, const char *path)
1275 {
1276 	zfs_cmd_t zc = { 0 };
1277 	char msg[1024];
1278 	nvlist_t *tgt;
1279 	boolean_t avail_spare;
1280 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1281 
1282 	if (path)
1283 		(void) snprintf(msg, sizeof (msg),
1284 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1285 		    path);
1286 	else
1287 		(void) snprintf(msg, sizeof (msg),
1288 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1289 		    zhp->zpool_name);
1290 
1291 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1292 	if (path) {
1293 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1294 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1295 
1296 		if (avail_spare)
1297 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1298 
1299 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1300 		    &zc.zc_guid) == 0);
1301 	}
1302 
1303 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1304 		return (0);
1305 
1306 	return (zpool_standard_error(hdl, errno, msg));
1307 }
1308 
1309 /*
1310  * Similar to zpool_clear(), but takes a GUID (used by fmd).
1311  */
1312 int
1313 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
1314 {
1315 	zfs_cmd_t zc = { 0 };
1316 	char msg[1024];
1317 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1318 
1319 	(void) snprintf(msg, sizeof (msg),
1320 	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
1321 	    guid);
1322 
1323 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1324 	zc.zc_guid = guid;
1325 
1326 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1327 		return (0);
1328 
1329 	return (zpool_standard_error(hdl, errno, msg));
1330 }
1331 
1332 /*
1333  * Iterate over all zvols in a given pool by walking the /dev/zvol/dsk/<pool>
1334  * hierarchy.
1335  */
1336 int
1337 zpool_iter_zvol(zpool_handle_t *zhp, int (*cb)(const char *, void *),
1338     void *data)
1339 {
1340 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1341 	char (*paths)[MAXPATHLEN];
1342 	size_t size = 4;
1343 	int curr, fd, base, ret = 0;
1344 	DIR *dirp;
1345 	struct dirent *dp;
1346 	struct stat st;
1347 
1348 	if ((base = open("/dev/zvol/dsk", O_RDONLY)) < 0)
1349 		return (errno == ENOENT ? 0 : -1);
1350 
1351 	if (fstatat(base, zhp->zpool_name, &st, 0) != 0) {
1352 		int err = errno;
1353 		(void) close(base);
1354 		return (err == ENOENT ? 0 : -1);
1355 	}
1356 
1357 	/*
1358 	 * Oddly this wasn't a directory -- ignore that failure since we
1359 	 * know there are no links lower in the (non-existant) hierarchy.
1360 	 */
1361 	if (!S_ISDIR(st.st_mode)) {
1362 		(void) close(base);
1363 		return (0);
1364 	}
1365 
1366 	if ((paths = zfs_alloc(hdl, size * sizeof (paths[0]))) == NULL) {
1367 		(void) close(base);
1368 		return (-1);
1369 	}
1370 
1371 	(void) strlcpy(paths[0], zhp->zpool_name, sizeof (paths[0]));
1372 	curr = 0;
1373 
1374 	while (curr >= 0) {
1375 		if (fstatat(base, paths[curr], &st, AT_SYMLINK_NOFOLLOW) != 0)
1376 			goto err;
1377 
1378 		if (S_ISDIR(st.st_mode)) {
1379 			if ((fd = openat(base, paths[curr], O_RDONLY)) < 0)
1380 				goto err;
1381 
1382 			if ((dirp = fdopendir(fd)) == NULL) {
1383 				(void) close(fd);
1384 				goto err;
1385 			}
1386 
1387 			while ((dp = readdir(dirp)) != NULL) {
1388 				if (dp->d_name[0] == '.')
1389 					continue;
1390 
1391 				if (curr + 1 == size) {
1392 					paths = zfs_realloc(hdl, paths,
1393 					    size * sizeof (paths[0]),
1394 					    size * 2 * sizeof (paths[0]));
1395 					if (paths == NULL) {
1396 						(void) closedir(dirp);
1397 						(void) close(fd);
1398 						goto err;
1399 					}
1400 
1401 					size *= 2;
1402 				}
1403 
1404 				(void) strlcpy(paths[curr + 1], paths[curr],
1405 				    sizeof (paths[curr + 1]));
1406 				(void) strlcat(paths[curr], "/",
1407 				    sizeof (paths[curr]));
1408 				(void) strlcat(paths[curr], dp->d_name,
1409 				    sizeof (paths[curr]));
1410 				curr++;
1411 			}
1412 
1413 			(void) closedir(dirp);
1414 
1415 		} else {
1416 			if ((ret = cb(paths[curr], data)) != 0)
1417 				break;
1418 		}
1419 
1420 		curr--;
1421 	}
1422 
1423 	free(paths);
1424 	(void) close(base);
1425 
1426 	return (ret);
1427 
1428 err:
1429 	free(paths);
1430 	(void) close(base);
1431 	return (-1);
1432 }
1433 
1434 typedef struct zvol_cb {
1435 	zpool_handle_t *zcb_pool;
1436 	boolean_t zcb_create;
1437 } zvol_cb_t;
1438 
1439 /*ARGSUSED*/
1440 static int
1441 do_zvol_create(zfs_handle_t *zhp, void *data)
1442 {
1443 	int ret;
1444 
1445 	if (ZFS_IS_VOLUME(zhp))
1446 		(void) zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1447 
1448 	ret = zfs_iter_children(zhp, do_zvol_create, NULL);
1449 
1450 	zfs_close(zhp);
1451 
1452 	return (ret);
1453 }
1454 
1455 /*
1456  * Iterate over all zvols in the pool and make any necessary minor nodes.
1457  */
1458 int
1459 zpool_create_zvol_links(zpool_handle_t *zhp)
1460 {
1461 	zfs_handle_t *zfp;
1462 	int ret;
1463 
1464 	/*
1465 	 * If the pool is unavailable, just return success.
1466 	 */
1467 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1468 	    zhp->zpool_name)) == NULL)
1469 		return (0);
1470 
1471 	ret = zfs_iter_children(zfp, do_zvol_create, NULL);
1472 
1473 	zfs_close(zfp);
1474 	return (ret);
1475 }
1476 
1477 static int
1478 do_zvol_remove(const char *dataset, void *data)
1479 {
1480 	zpool_handle_t *zhp = data;
1481 
1482 	return (zvol_remove_link(zhp->zpool_hdl, dataset));
1483 }
1484 
1485 /*
1486  * Iterate over all zvols in the pool and remove any minor nodes.  We iterate
1487  * by examining the /dev links so that a corrupted pool doesn't impede this
1488  * operation.
1489  */
1490 int
1491 zpool_remove_zvol_links(zpool_handle_t *zhp)
1492 {
1493 	return (zpool_iter_zvol(zhp, do_zvol_remove, zhp));
1494 }
1495 
1496 /*
1497  * Convert from a devid string to a path.
1498  */
1499 static char *
1500 devid_to_path(char *devid_str)
1501 {
1502 	ddi_devid_t devid;
1503 	char *minor;
1504 	char *path;
1505 	devid_nmlist_t *list = NULL;
1506 	int ret;
1507 
1508 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1509 		return (NULL);
1510 
1511 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1512 
1513 	devid_str_free(minor);
1514 	devid_free(devid);
1515 
1516 	if (ret != 0)
1517 		return (NULL);
1518 
1519 	if ((path = strdup(list[0].devname)) == NULL)
1520 		return (NULL);
1521 
1522 	devid_free_nmlist(list);
1523 
1524 	return (path);
1525 }
1526 
1527 /*
1528  * Convert from a path to a devid string.
1529  */
1530 static char *
1531 path_to_devid(const char *path)
1532 {
1533 	int fd;
1534 	ddi_devid_t devid;
1535 	char *minor, *ret;
1536 
1537 	if ((fd = open(path, O_RDONLY)) < 0)
1538 		return (NULL);
1539 
1540 	minor = NULL;
1541 	ret = NULL;
1542 	if (devid_get(fd, &devid) == 0) {
1543 		if (devid_get_minor_name(fd, &minor) == 0)
1544 			ret = devid_str_encode(devid, minor);
1545 		if (minor != NULL)
1546 			devid_str_free(minor);
1547 		devid_free(devid);
1548 	}
1549 	(void) close(fd);
1550 
1551 	return (ret);
1552 }
1553 
1554 /*
1555  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1556  * ignore any failure here, since a common case is for an unprivileged user to
1557  * type 'zpool status', and we'll display the correct information anyway.
1558  */
1559 static void
1560 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1561 {
1562 	zfs_cmd_t zc = { 0 };
1563 
1564 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1565 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1566 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1567 	    &zc.zc_guid) == 0);
1568 
1569 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1570 }
1571 
1572 /*
1573  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1574  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1575  * We also check if this is a whole disk, in which case we strip off the
1576  * trailing 's0' slice name.
1577  *
1578  * This routine is also responsible for identifying when disks have been
1579  * reconfigured in a new location.  The kernel will have opened the device by
1580  * devid, but the path will still refer to the old location.  To catch this, we
1581  * first do a path -> devid translation (which is fast for the common case).  If
1582  * the devid matches, we're done.  If not, we do a reverse devid -> path
1583  * translation and issue the appropriate ioctl() to update the path of the vdev.
1584  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1585  * of these checks.
1586  */
1587 char *
1588 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1589 {
1590 	char *path, *devid;
1591 	uint64_t value;
1592 	char buf[64];
1593 	vdev_stat_t *vs;
1594 	uint_t vsc;
1595 
1596 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1597 	    &value) == 0) {
1598 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1599 		    &value) == 0);
1600 		(void) snprintf(buf, sizeof (buf), "%llu",
1601 		    (u_longlong_t)value);
1602 		path = buf;
1603 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1604 
1605 		/*
1606 		 * If the device is dead (faulted, offline, etc) then don't
1607 		 * bother opening it.  Otherwise we may be forcing the user to
1608 		 * open a misbehaving device, which can have undesirable
1609 		 * effects.
1610 		 */
1611 		if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_STATS,
1612 		    (uint64_t **)&vs, &vsc) != 0 ||
1613 		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
1614 		    zhp != NULL &&
1615 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1616 			/*
1617 			 * Determine if the current path is correct.
1618 			 */
1619 			char *newdevid = path_to_devid(path);
1620 
1621 			if (newdevid == NULL ||
1622 			    strcmp(devid, newdevid) != 0) {
1623 				char *newpath;
1624 
1625 				if ((newpath = devid_to_path(devid)) != NULL) {
1626 					/*
1627 					 * Update the path appropriately.
1628 					 */
1629 					set_path(zhp, nv, newpath);
1630 					if (nvlist_add_string(nv,
1631 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1632 						verify(nvlist_lookup_string(nv,
1633 						    ZPOOL_CONFIG_PATH,
1634 						    &path) == 0);
1635 					free(newpath);
1636 				}
1637 			}
1638 
1639 			if (newdevid)
1640 				devid_str_free(newdevid);
1641 		}
1642 
1643 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1644 			path += 9;
1645 
1646 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1647 		    &value) == 0 && value) {
1648 			char *tmp = zfs_strdup(hdl, path);
1649 			if (tmp == NULL)
1650 				return (NULL);
1651 			tmp[strlen(path) - 2] = '\0';
1652 			return (tmp);
1653 		}
1654 	} else {
1655 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1656 
1657 		/*
1658 		 * If it's a raidz device, we need to stick in the parity level.
1659 		 */
1660 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1661 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1662 			    &value) == 0);
1663 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1664 			    (u_longlong_t)value);
1665 			path = buf;
1666 		}
1667 	}
1668 
1669 	return (zfs_strdup(hdl, path));
1670 }
1671 
1672 static int
1673 zbookmark_compare(const void *a, const void *b)
1674 {
1675 	return (memcmp(a, b, sizeof (zbookmark_t)));
1676 }
1677 
1678 /*
1679  * Retrieve the persistent error log, uniquify the members, and return to the
1680  * caller.
1681  */
1682 int
1683 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
1684 {
1685 	zfs_cmd_t zc = { 0 };
1686 	uint64_t count;
1687 	zbookmark_t *zb = NULL;
1688 	int i;
1689 
1690 	/*
1691 	 * Retrieve the raw error list from the kernel.  If the number of errors
1692 	 * has increased, allocate more space and continue until we get the
1693 	 * entire list.
1694 	 */
1695 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1696 	    &count) == 0);
1697 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1698 	    count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
1699 		return (-1);
1700 	zc.zc_nvlist_dst_size = count;
1701 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1702 	for (;;) {
1703 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1704 		    &zc) != 0) {
1705 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1706 			if (errno == ENOMEM) {
1707 				count = zc.zc_nvlist_dst_size;
1708 				if ((zc.zc_nvlist_dst = (uintptr_t)
1709 				    zfs_alloc(zhp->zpool_hdl, count *
1710 				    sizeof (zbookmark_t))) == (uintptr_t)NULL)
1711 					return (-1);
1712 			} else {
1713 				return (-1);
1714 			}
1715 		} else {
1716 			break;
1717 		}
1718 	}
1719 
1720 	/*
1721 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1722 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1723 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1724 	 * _not_ copied as part of the process.  So we point the start of our
1725 	 * array appropriate and decrement the total number of elements.
1726 	 */
1727 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1728 	    zc.zc_nvlist_dst_size;
1729 	count -= zc.zc_nvlist_dst_size;
1730 
1731 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1732 
1733 	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
1734 
1735 	/*
1736 	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
1737 	 */
1738 	for (i = 0; i < count; i++) {
1739 		nvlist_t *nv;
1740 
1741 		/* ignoring zb_blkid and zb_level for now */
1742 		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
1743 		    zb[i-1].zb_object == zb[i].zb_object)
1744 			continue;
1745 
1746 		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
1747 			goto nomem;
1748 		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
1749 		    zb[i].zb_objset) != 0) {
1750 			nvlist_free(nv);
1751 			goto nomem;
1752 		}
1753 		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
1754 		    zb[i].zb_object) != 0) {
1755 			nvlist_free(nv);
1756 			goto nomem;
1757 		}
1758 		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
1759 			nvlist_free(nv);
1760 			goto nomem;
1761 		}
1762 		nvlist_free(nv);
1763 	}
1764 
1765 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1766 	return (0);
1767 
1768 nomem:
1769 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1770 	return (no_memory(zhp->zpool_hdl));
1771 }
1772 
1773 /*
1774  * Upgrade a ZFS pool to the latest on-disk version.
1775  */
1776 int
1777 zpool_upgrade(zpool_handle_t *zhp)
1778 {
1779 	zfs_cmd_t zc = { 0 };
1780 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1781 
1782 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1783 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1784 		return (zpool_standard_error_fmt(hdl, errno,
1785 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1786 		    zhp->zpool_name));
1787 
1788 	return (0);
1789 }
1790 
1791 /*
1792  * Log command history.
1793  *
1794  * 'pool' is B_TRUE if we are logging a command for 'zpool'; B_FALSE
1795  * otherwise ('zfs').  'pool_create' is B_TRUE if we are logging the creation
1796  * of the pool; B_FALSE otherwise.  'path' is the pathname containing the
1797  * poolname.  'argc' and 'argv' are used to construct the command string.
1798  */
1799 void
1800 zpool_log_history(libzfs_handle_t *hdl, int argc, char **argv, const char *path,
1801 	boolean_t pool, boolean_t pool_create)
1802 {
1803 	char cmd_buf[HIS_MAX_RECORD_LEN];
1804 	char *dspath;
1805 	zfs_cmd_t zc = { 0 };
1806 	int i;
1807 
1808 	/* construct the command string */
1809 	(void) strcpy(cmd_buf, pool ? "zpool" : "zfs");
1810 	for (i = 0; i < argc; i++) {
1811 		if (strlen(cmd_buf) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN)
1812 			break;
1813 		(void) strcat(cmd_buf, " ");
1814 		(void) strcat(cmd_buf, argv[i]);
1815 	}
1816 
1817 	/* figure out the poolname */
1818 	dspath = strpbrk(path, "/@");
1819 	if (dspath == NULL) {
1820 		(void) strcpy(zc.zc_name, path);
1821 	} else {
1822 		(void) strncpy(zc.zc_name, path, dspath - path);
1823 		zc.zc_name[dspath-path] = '\0';
1824 	}
1825 
1826 	zc.zc_history = (uint64_t)(uintptr_t)cmd_buf;
1827 	zc.zc_history_len = strlen(cmd_buf);
1828 
1829 	/* overloading zc_history_offset */
1830 	zc.zc_history_offset = pool_create;
1831 
1832 	(void) ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_LOG_HISTORY, &zc);
1833 }
1834 
1835 /*
1836  * Perform ioctl to get some command history of a pool.
1837  *
1838  * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
1839  * logical offset of the history buffer to start reading from.
1840  *
1841  * Upon return, 'off' is the next logical offset to read from and
1842  * 'len' is the actual amount of bytes read into 'buf'.
1843  */
1844 static int
1845 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
1846 {
1847 	zfs_cmd_t zc = { 0 };
1848 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1849 
1850 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1851 
1852 	zc.zc_history = (uint64_t)(uintptr_t)buf;
1853 	zc.zc_history_len = *len;
1854 	zc.zc_history_offset = *off;
1855 
1856 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
1857 		switch (errno) {
1858 		case EPERM:
1859 			return (zfs_error_fmt(hdl, EZFS_PERM,
1860 			    dgettext(TEXT_DOMAIN,
1861 			    "cannot show history for pool '%s'"),
1862 			    zhp->zpool_name));
1863 		case ENOENT:
1864 			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
1865 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1866 			    "'%s'"), zhp->zpool_name));
1867 		case ENOTSUP:
1868 			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
1869 			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
1870 			    "'%s', pool must be upgraded"), zhp->zpool_name));
1871 		default:
1872 			return (zpool_standard_error_fmt(hdl, errno,
1873 			    dgettext(TEXT_DOMAIN,
1874 			    "cannot get history for '%s'"), zhp->zpool_name));
1875 		}
1876 	}
1877 
1878 	*len = zc.zc_history_len;
1879 	*off = zc.zc_history_offset;
1880 
1881 	return (0);
1882 }
1883 
1884 /*
1885  * Process the buffer of nvlists, unpacking and storing each nvlist record
1886  * into 'records'.  'leftover' is set to the number of bytes that weren't
1887  * processed as there wasn't a complete record.
1888  */
1889 static int
1890 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
1891     nvlist_t ***records, uint_t *numrecords)
1892 {
1893 	uint64_t reclen;
1894 	nvlist_t *nv;
1895 	int i;
1896 
1897 	while (bytes_read > sizeof (reclen)) {
1898 
1899 		/* get length of packed record (stored as little endian) */
1900 		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
1901 			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
1902 
1903 		if (bytes_read < sizeof (reclen) + reclen)
1904 			break;
1905 
1906 		/* unpack record */
1907 		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
1908 			return (ENOMEM);
1909 		bytes_read -= sizeof (reclen) + reclen;
1910 		buf += sizeof (reclen) + reclen;
1911 
1912 		/* add record to nvlist array */
1913 		(*numrecords)++;
1914 		if (ISP2(*numrecords + 1)) {
1915 			*records = realloc(*records,
1916 			    *numrecords * 2 * sizeof (nvlist_t *));
1917 		}
1918 		(*records)[*numrecords - 1] = nv;
1919 	}
1920 
1921 	*leftover = bytes_read;
1922 	return (0);
1923 }
1924 
1925 #define	HIS_BUF_LEN	(128*1024)
1926 
1927 /*
1928  * Retrieve the command history of a pool.
1929  */
1930 int
1931 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
1932 {
1933 	char buf[HIS_BUF_LEN];
1934 	uint64_t off = 0;
1935 	nvlist_t **records = NULL;
1936 	uint_t numrecords = 0;
1937 	int err, i;
1938 
1939 	do {
1940 		uint64_t bytes_read = sizeof (buf);
1941 		uint64_t leftover;
1942 
1943 		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
1944 			break;
1945 
1946 		/* if nothing else was read in, we're at EOF, just return */
1947 		if (!bytes_read)
1948 			break;
1949 
1950 		if ((err = zpool_history_unpack(buf, bytes_read,
1951 		    &leftover, &records, &numrecords)) != 0)
1952 			break;
1953 		off -= leftover;
1954 
1955 		/* CONSTCOND */
1956 	} while (1);
1957 
1958 	if (!err) {
1959 		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
1960 		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
1961 		    records, numrecords) == 0);
1962 	}
1963 	for (i = 0; i < numrecords; i++)
1964 		nvlist_free(records[i]);
1965 	free(records);
1966 
1967 	return (err);
1968 }
1969 
1970 void
1971 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
1972     char *pathname, size_t len)
1973 {
1974 	zfs_cmd_t zc = { 0 };
1975 	boolean_t mounted = B_FALSE;
1976 	char *mntpnt = NULL;
1977 	char dsname[MAXNAMELEN];
1978 
1979 	if (dsobj == 0) {
1980 		/* special case for the MOS */
1981 		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
1982 		return;
1983 	}
1984 
1985 	/* get the dataset's name */
1986 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1987 	zc.zc_obj = dsobj;
1988 	if (ioctl(zhp->zpool_hdl->libzfs_fd,
1989 	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
1990 		/* just write out a path of two object numbers */
1991 		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
1992 		    dsobj, obj);
1993 		return;
1994 	}
1995 	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
1996 
1997 	/* find out if the dataset is mounted */
1998 	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
1999 
2000 	/* get the corrupted object's path */
2001 	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
2002 	zc.zc_obj = obj;
2003 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
2004 	    &zc) == 0) {
2005 		if (mounted) {
2006 			(void) snprintf(pathname, len, "%s%s", mntpnt,
2007 			    zc.zc_value);
2008 		} else {
2009 			(void) snprintf(pathname, len, "%s:%s",
2010 			    dsname, zc.zc_value);
2011 		}
2012 	} else {
2013 		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
2014 	}
2015 	free(mntpnt);
2016 }
2017 
2018 #define	RDISK_ROOT	"/dev/rdsk"
2019 #define	BACKUP_SLICE	"s2"
2020 /*
2021  * Don't start the slice at the default block of 34; many storage
2022  * devices will use a stripe width of 128k, so start there instead.
2023  */
2024 #define	NEW_START_BLOCK	256
2025 
2026 /*
2027  * determine where a partition starts on a disk in the current
2028  * configuration
2029  */
2030 static diskaddr_t
2031 find_start_block(nvlist_t *config)
2032 {
2033 	nvlist_t **child;
2034 	uint_t c, children;
2035 	char *path;
2036 	diskaddr_t sb = MAXOFFSET_T;
2037 	int fd;
2038 	char diskname[MAXPATHLEN];
2039 	uint64_t wholedisk;
2040 
2041 	if (nvlist_lookup_nvlist_array(config,
2042 	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
2043 		if (nvlist_lookup_uint64(config,
2044 		    ZPOOL_CONFIG_WHOLE_DISK,
2045 		    &wholedisk) != 0 || !wholedisk) {
2046 			return (MAXOFFSET_T);
2047 		}
2048 		if (nvlist_lookup_string(config,
2049 		    ZPOOL_CONFIG_PATH, &path) != 0) {
2050 			return (MAXOFFSET_T);
2051 		}
2052 
2053 		(void) snprintf(diskname, sizeof (diskname), "%s%s",
2054 		    RDISK_ROOT, strrchr(path, '/'));
2055 		if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
2056 			struct dk_gpt *vtoc;
2057 			if (efi_alloc_and_read(fd, &vtoc) >= 0) {
2058 				sb = vtoc->efi_parts[0].p_start;
2059 				efi_free(vtoc);
2060 			}
2061 			(void) close(fd);
2062 		}
2063 		return (sb);
2064 	}
2065 
2066 	for (c = 0; c < children; c++) {
2067 		sb = find_start_block(child[c]);
2068 		if (sb != MAXOFFSET_T) {
2069 			return (sb);
2070 		}
2071 	}
2072 	return (MAXOFFSET_T);
2073 }
2074 
2075 /*
2076  * Label an individual disk.  The name provided is the short name,
2077  * stripped of any leading /dev path.
2078  */
2079 int
2080 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
2081 {
2082 	char path[MAXPATHLEN];
2083 	struct dk_gpt *vtoc;
2084 	int fd;
2085 	size_t resv = EFI_MIN_RESV_SIZE;
2086 	uint64_t slice_size;
2087 	diskaddr_t start_block;
2088 	char errbuf[1024];
2089 
2090 	if (zhp) {
2091 		nvlist_t *nvroot;
2092 
2093 		verify(nvlist_lookup_nvlist(zhp->zpool_config,
2094 		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
2095 
2096 		if (zhp->zpool_start_block == 0)
2097 			start_block = find_start_block(nvroot);
2098 		else
2099 			start_block = zhp->zpool_start_block;
2100 		zhp->zpool_start_block = start_block;
2101 	} else {
2102 		/* new pool */
2103 		start_block = NEW_START_BLOCK;
2104 	}
2105 
2106 	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
2107 	    BACKUP_SLICE);
2108 
2109 	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2110 		/*
2111 		 * This shouldn't happen.  We've long since verified that this
2112 		 * is a valid device.
2113 		 */
2114 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2115 		    "label '%s': unable to open device"), name);
2116 		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2117 	}
2118 
2119 	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
2120 		/*
2121 		 * The only way this can fail is if we run out of memory, or we
2122 		 * were unable to read the disk's capacity
2123 		 */
2124 		if (errno == ENOMEM)
2125 			(void) no_memory(hdl);
2126 
2127 		(void) close(fd);
2128 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2129 		    "label '%s': unable to read disk capacity"), name);
2130 
2131 		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2132 	}
2133 
2134 	slice_size = vtoc->efi_last_u_lba + 1;
2135 	slice_size -= EFI_MIN_RESV_SIZE;
2136 	if (start_block == MAXOFFSET_T)
2137 		start_block = NEW_START_BLOCK;
2138 	slice_size -= start_block;
2139 
2140 	vtoc->efi_parts[0].p_start = start_block;
2141 	vtoc->efi_parts[0].p_size = slice_size;
2142 
2143 	/*
2144 	 * Why we use V_USR: V_BACKUP confuses users, and is considered
2145 	 * disposable by some EFI utilities (since EFI doesn't have a backup
2146 	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
2147 	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
2148 	 * etc. were all pretty specific.  V_USR is as close to reality as we
2149 	 * can get, in the absence of V_OTHER.
2150 	 */
2151 	vtoc->efi_parts[0].p_tag = V_USR;
2152 	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
2153 
2154 	vtoc->efi_parts[8].p_start = slice_size + start_block;
2155 	vtoc->efi_parts[8].p_size = resv;
2156 	vtoc->efi_parts[8].p_tag = V_RESERVED;
2157 
2158 	if (efi_write(fd, vtoc) != 0) {
2159 		/*
2160 		 * Some block drivers (like pcata) may not support EFI
2161 		 * GPT labels.  Print out a helpful error message dir-
2162 		 * ecting the user to manually label the disk and give
2163 		 * a specific slice.
2164 		 */
2165 		(void) close(fd);
2166 		efi_free(vtoc);
2167 
2168 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2169 		    "cannot label '%s': try using fdisk(1M) and then "
2170 		    "provide a specific slice"), name);
2171 		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
2172 	}
2173 
2174 	(void) close(fd);
2175 	efi_free(vtoc);
2176 	return (0);
2177 }
2178 
2179 int
2180 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
2181 {
2182 	zfs_cmd_t zc = { 0 };
2183 	int ret = -1;
2184 	char errbuf[1024];
2185 	nvlist_t *nvl = NULL;
2186 	nvlist_t *realprops;
2187 
2188 	(void) snprintf(errbuf, sizeof (errbuf),
2189 	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
2190 	    zhp->zpool_name);
2191 
2192 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
2193 		zfs_error_aux(zhp->zpool_hdl,
2194 		    dgettext(TEXT_DOMAIN, "pool must be "
2195 		    "upgraded to support pool properties"));
2196 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, errbuf));
2197 	}
2198 
2199 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2200 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, errbuf));
2201 
2202 	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
2203 	    nvlist_add_string(nvl, propname, propval) != 0) {
2204 		return (no_memory(zhp->zpool_hdl));
2205 	}
2206 
2207 	if ((realprops = zfs_validate_properties(zhp->zpool_hdl, ZFS_TYPE_POOL,
2208 	    zhp->zpool_name, nvl, 0, NULL, errbuf)) == NULL) {
2209 		nvlist_free(nvl);
2210 		return (-1);
2211 	}
2212 
2213 	nvlist_free(nvl);
2214 	nvl = realprops;
2215 
2216 	/*
2217 	 * Execute the corresponding ioctl() to set this property.
2218 	 */
2219 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2220 
2221 	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl, NULL) != 0)
2222 		return (-1);
2223 
2224 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SET_PROPS, &zc);
2225 	zcmd_free_nvlists(&zc);
2226 
2227 	if (ret)
2228 		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
2229 
2230 	return (ret);
2231 }
2232 
2233 uint64_t
2234 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop)
2235 {
2236 	uint64_t value;
2237 	nvlist_t *nvp;
2238 
2239 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS)
2240 		return (0);
2241 
2242 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp))
2243 		return (zpool_prop_default_numeric(prop));
2244 
2245 	switch (prop) {
2246 	case ZPOOL_PROP_AUTOREPLACE:
2247 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2248 		    zpool_prop_to_name(prop), &nvp) != 0) {
2249 			value = zpool_prop_default_numeric(prop);
2250 		} else {
2251 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2252 			    &value) == 0);
2253 		}
2254 		return (value);
2255 		break;
2256 
2257 	default:
2258 		assert(0);
2259 	}
2260 
2261 	return (0);
2262 }
2263 
2264 int
2265 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *propbuf,
2266     size_t proplen, zfs_source_t *srctype)
2267 {
2268 	uint64_t value;
2269 	char msg[1024], *strvalue;
2270 	nvlist_t *nvp;
2271 	zfs_source_t src = ZFS_SRC_NONE;
2272 
2273 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2274 	    "cannot get property '%s'"), zpool_prop_to_name(prop));
2275 
2276 	if (zpool_get_version(zhp) < ZFS_VERSION_BOOTFS) {
2277 		zfs_error_aux(zhp->zpool_hdl,
2278 		    dgettext(TEXT_DOMAIN, "pool must be "
2279 		    "upgraded to support pool properties"));
2280 		return (zfs_error(zhp->zpool_hdl, EZFS_BADVERSION, msg));
2281 	}
2282 
2283 	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
2284 	    prop != ZPOOL_PROP_NAME)
2285 		return (zfs_error(zhp->zpool_hdl, EZFS_POOLPROPS, msg));
2286 
2287 	switch (prop) {
2288 	case ZPOOL_PROP_NAME:
2289 		(void) strlcpy(propbuf, zhp->zpool_name, proplen);
2290 		break;
2291 
2292 	case ZPOOL_PROP_BOOTFS:
2293 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2294 		    zpool_prop_to_name(prop), &nvp) != 0) {
2295 			strvalue = (char *)zfs_prop_default_string(prop);
2296 			if (strvalue == NULL)
2297 				strvalue = "-";
2298 			src = ZFS_SRC_DEFAULT;
2299 		} else {
2300 			VERIFY(nvlist_lookup_uint64(nvp,
2301 			    ZFS_PROP_SOURCE, &value) == 0);
2302 			src = value;
2303 			VERIFY(nvlist_lookup_string(nvp, ZFS_PROP_VALUE,
2304 			    &strvalue) == 0);
2305 			if (strlen(strvalue) >= proplen)
2306 				return (-1);
2307 		}
2308 		(void) strlcpy(propbuf, strvalue, proplen);
2309 		break;
2310 
2311 	case ZPOOL_PROP_AUTOREPLACE:
2312 		if (nvlist_lookup_nvlist(zhp->zpool_props,
2313 		    zpool_prop_to_name(prop), &nvp) != 0) {
2314 			value = zpool_prop_default_numeric(prop);
2315 			src = ZFS_SRC_DEFAULT;
2316 		} else {
2317 			VERIFY(nvlist_lookup_uint64(nvp,
2318 			    ZFS_PROP_SOURCE, &value) == 0);
2319 			src = value;
2320 			VERIFY(nvlist_lookup_uint64(nvp, ZFS_PROP_VALUE,
2321 			    &value) == 0);
2322 		}
2323 		(void) strlcpy(propbuf, value ? "on" : "off", proplen);
2324 		break;
2325 
2326 	default:
2327 		return (-1);
2328 	}
2329 	if (srctype)
2330 		*srctype = src;
2331 	return (0);
2332 }
2333 
2334 int
2335 zpool_get_proplist(libzfs_handle_t *hdl, char *fields, zpool_proplist_t **listp)
2336 {
2337 	return (zfs_get_proplist_common(hdl, fields, listp, ZFS_TYPE_POOL));
2338 }
2339 
2340 
2341 int
2342 zpool_expand_proplist(zpool_handle_t *zhp, zpool_proplist_t **plp)
2343 {
2344 	libzfs_handle_t *hdl = zhp->zpool_hdl;
2345 	zpool_proplist_t *entry;
2346 	char buf[ZFS_MAXPROPLEN];
2347 
2348 	if (zfs_expand_proplist_common(hdl, plp, ZFS_TYPE_POOL) != 0)
2349 		return (-1);
2350 
2351 	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
2352 
2353 		if (entry->pl_fixed)
2354 			continue;
2355 
2356 		if (entry->pl_prop != ZFS_PROP_INVAL &&
2357 		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
2358 		    NULL) == 0) {
2359 			if (strlen(buf) > entry->pl_width)
2360 				entry->pl_width = strlen(buf);
2361 		}
2362 	}
2363 
2364 	return (0);
2365 }
2366