xref: /illumos-gate/usr/src/lib/libzfs/common/libzfs_pool.c (revision e9dbad6f263d5570ed7ff5443ec5b958af8c24d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <assert.h>
30 #include <ctype.h>
31 #include <errno.h>
32 #include <devid.h>
33 #include <fcntl.h>
34 #include <libintl.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/zfs_ioctl.h>
40 #include <sys/zio.h>
41 
42 #include "zfs_namecheck.h"
43 #include "libzfs_impl.h"
44 
45 /*
46  * Validate the given pool name, optionally putting an extended error message in
47  * 'buf'.
48  */
49 static boolean_t
50 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
51 {
52 	namecheck_err_t why;
53 	char what;
54 	int ret;
55 
56 	ret = pool_namecheck(pool, &why, &what);
57 
58 	/*
59 	 * The rules for reserved pool names were extended at a later point.
60 	 * But we need to support users with existing pools that may now be
61 	 * invalid.  So we only check for this expanded set of names during a
62 	 * create (or import), and only in userland.
63 	 */
64 	if (ret == 0 && !isopen &&
65 	    (strncmp(pool, "mirror", 6) == 0 ||
66 	    strncmp(pool, "raidz", 5) == 0 ||
67 	    strncmp(pool, "spare", 5) == 0)) {
68 		zfs_error_aux(hdl,
69 		    dgettext(TEXT_DOMAIN, "name is reserved"));
70 		return (B_FALSE);
71 	}
72 
73 
74 	if (ret != 0) {
75 		if (hdl != NULL) {
76 			switch (why) {
77 			case NAME_ERR_TOOLONG:
78 				zfs_error_aux(hdl,
79 				    dgettext(TEXT_DOMAIN, "name is too long"));
80 				break;
81 
82 			case NAME_ERR_INVALCHAR:
83 				zfs_error_aux(hdl,
84 				    dgettext(TEXT_DOMAIN, "invalid character "
85 				    "'%c' in pool name"), what);
86 				break;
87 
88 			case NAME_ERR_NOLETTER:
89 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
90 				    "name must begin with a letter"));
91 				break;
92 
93 			case NAME_ERR_RESERVED:
94 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
95 				    "name is reserved"));
96 				break;
97 
98 			case NAME_ERR_DISKLIKE:
99 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
100 				    "pool name is reserved"));
101 				break;
102 			}
103 		}
104 		return (B_FALSE);
105 	}
106 
107 	return (B_TRUE);
108 }
109 
110 /*
111  * Set the pool-wide health based on the vdev state of the root vdev.
112  */
113 int
114 set_pool_health(nvlist_t *config)
115 {
116 	nvlist_t *nvroot;
117 	vdev_stat_t *vs;
118 	uint_t vsc;
119 	char *health;
120 
121 	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
122 	    &nvroot) == 0);
123 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
124 	    (uint64_t **)&vs, &vsc) == 0);
125 
126 	switch (vs->vs_state) {
127 
128 	case VDEV_STATE_CLOSED:
129 	case VDEV_STATE_CANT_OPEN:
130 	case VDEV_STATE_OFFLINE:
131 		health = dgettext(TEXT_DOMAIN, "FAULTED");
132 		break;
133 
134 	case VDEV_STATE_DEGRADED:
135 		health = dgettext(TEXT_DOMAIN, "DEGRADED");
136 		break;
137 
138 	case VDEV_STATE_HEALTHY:
139 		health = dgettext(TEXT_DOMAIN, "ONLINE");
140 		break;
141 
142 	default:
143 		abort();
144 	}
145 
146 	return (nvlist_add_string(config, ZPOOL_CONFIG_POOL_HEALTH, health));
147 }
148 
149 /*
150  * Open a handle to the given pool, even if the pool is currently in the FAULTED
151  * state.
152  */
153 zpool_handle_t *
154 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
155 {
156 	zpool_handle_t *zhp;
157 	boolean_t missing;
158 
159 	/*
160 	 * Make sure the pool name is valid.
161 	 */
162 	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
163 		(void) zfs_error(hdl, EZFS_INVALIDNAME,
164 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
165 		    pool);
166 		return (NULL);
167 	}
168 
169 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
170 		return (NULL);
171 
172 	zhp->zpool_hdl = hdl;
173 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
174 
175 	if (zpool_refresh_stats(zhp, &missing) != 0) {
176 		zpool_close(zhp);
177 		return (NULL);
178 	}
179 
180 	if (missing) {
181 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
182 		    "no such pool"));
183 		(void) zfs_error(hdl, EZFS_NOENT,
184 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
185 		    pool);
186 		zpool_close(zhp);
187 		return (NULL);
188 	}
189 
190 	return (zhp);
191 }
192 
193 /*
194  * Like the above, but silent on error.  Used when iterating over pools (because
195  * the configuration cache may be out of date).
196  */
197 int
198 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
199 {
200 	zpool_handle_t *zhp;
201 	boolean_t missing;
202 
203 	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
204 		return (-1);
205 
206 	zhp->zpool_hdl = hdl;
207 	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
208 
209 	if (zpool_refresh_stats(zhp, &missing) != 0) {
210 		zpool_close(zhp);
211 		return (-1);
212 	}
213 
214 	if (missing) {
215 		zpool_close(zhp);
216 		*ret = NULL;
217 		return (0);
218 	}
219 
220 	*ret = zhp;
221 	return (0);
222 }
223 
224 /*
225  * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
226  * state.
227  */
228 zpool_handle_t *
229 zpool_open(libzfs_handle_t *hdl, const char *pool)
230 {
231 	zpool_handle_t *zhp;
232 
233 	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
234 		return (NULL);
235 
236 	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
237 		(void) zfs_error(hdl, EZFS_POOLUNAVAIL,
238 		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
239 		zpool_close(zhp);
240 		return (NULL);
241 	}
242 
243 	return (zhp);
244 }
245 
246 /*
247  * Close the handle.  Simply frees the memory associated with the handle.
248  */
249 void
250 zpool_close(zpool_handle_t *zhp)
251 {
252 	if (zhp->zpool_config)
253 		nvlist_free(zhp->zpool_config);
254 	if (zhp->zpool_old_config)
255 		nvlist_free(zhp->zpool_old_config);
256 	if (zhp->zpool_error_log) {
257 		int i;
258 		for (i = 0; i < zhp->zpool_error_count; i++)
259 			nvlist_free(zhp->zpool_error_log[i]);
260 		free(zhp->zpool_error_log);
261 	}
262 	free(zhp);
263 }
264 
265 /*
266  * Return the name of the pool.
267  */
268 const char *
269 zpool_get_name(zpool_handle_t *zhp)
270 {
271 	return (zhp->zpool_name);
272 }
273 
274 /*
275  * Return the GUID of the pool.
276  */
277 uint64_t
278 zpool_get_guid(zpool_handle_t *zhp)
279 {
280 	uint64_t guid;
281 
282 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
283 	    &guid) == 0);
284 	return (guid);
285 }
286 
287 /*
288  * Return the version of the pool.
289  */
290 uint64_t
291 zpool_get_version(zpool_handle_t *zhp)
292 {
293 	uint64_t version;
294 
295 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_VERSION,
296 	    &version) == 0);
297 
298 	return (version);
299 }
300 
301 /*
302  * Return the amount of space currently consumed by the pool.
303  */
304 uint64_t
305 zpool_get_space_used(zpool_handle_t *zhp)
306 {
307 	nvlist_t *nvroot;
308 	vdev_stat_t *vs;
309 	uint_t vsc;
310 
311 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
312 	    &nvroot) == 0);
313 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
314 	    (uint64_t **)&vs, &vsc) == 0);
315 
316 	return (vs->vs_alloc);
317 }
318 
319 /*
320  * Return the total space in the pool.
321  */
322 uint64_t
323 zpool_get_space_total(zpool_handle_t *zhp)
324 {
325 	nvlist_t *nvroot;
326 	vdev_stat_t *vs;
327 	uint_t vsc;
328 
329 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
330 	    &nvroot) == 0);
331 	verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_STATS,
332 	    (uint64_t **)&vs, &vsc) == 0);
333 
334 	return (vs->vs_space);
335 }
336 
337 /*
338  * Return the alternate root for this pool, if any.
339  */
340 int
341 zpool_get_root(zpool_handle_t *zhp, char *buf, size_t buflen)
342 {
343 	zfs_cmd_t zc = { 0 };
344 
345 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
346 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 ||
347 	    zc.zc_value[0] == '\0')
348 		return (-1);
349 
350 	(void) strlcpy(buf, zc.zc_value, buflen);
351 
352 	return (0);
353 }
354 
355 /*
356  * Return the state of the pool (ACTIVE or UNAVAILABLE)
357  */
358 int
359 zpool_get_state(zpool_handle_t *zhp)
360 {
361 	return (zhp->zpool_state);
362 }
363 
364 /*
365  * Create the named pool, using the provided vdev list.  It is assumed
366  * that the consumer has already validated the contents of the nvlist, so we
367  * don't have to worry about error semantics.
368  */
369 int
370 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
371     const char *altroot)
372 {
373 	zfs_cmd_t zc = { 0 };
374 	char msg[1024];
375 
376 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
377 	    "cannot create '%s'"), pool);
378 
379 	if (!zpool_name_valid(hdl, B_FALSE, pool))
380 		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
381 
382 	if (altroot != NULL && altroot[0] != '/')
383 		return (zfs_error(hdl, EZFS_BADPATH,
384 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), altroot));
385 
386 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
387 		return (-1);
388 
389 	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
390 
391 	if (altroot != NULL)
392 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
393 
394 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_CREATE, &zc) != 0) {
395 		zcmd_free_nvlists(&zc);
396 
397 		switch (errno) {
398 		case EBUSY:
399 			/*
400 			 * This can happen if the user has specified the same
401 			 * device multiple times.  We can't reliably detect this
402 			 * until we try to add it and see we already have a
403 			 * label.
404 			 */
405 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
406 			    "one or more vdevs refer to the same device"));
407 			return (zfs_error(hdl, EZFS_BADDEV, msg));
408 
409 		case EOVERFLOW:
410 			/*
411 			 * This occurs when one of the devices is below
412 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
413 			 * device was the problem device since there's no
414 			 * reliable way to determine device size from userland.
415 			 */
416 			{
417 				char buf[64];
418 
419 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
420 
421 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
422 				    "one or more devices is less than the "
423 				    "minimum size (%s)"), buf);
424 			}
425 			return (zfs_error(hdl, EZFS_BADDEV, msg));
426 
427 		case ENOSPC:
428 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
429 			    "one or more devices is out of space"));
430 			return (zfs_error(hdl, EZFS_BADDEV, msg));
431 
432 		default:
433 			return (zpool_standard_error(hdl, errno, msg));
434 		}
435 	}
436 
437 	zcmd_free_nvlists(&zc);
438 
439 	/*
440 	 * If this is an alternate root pool, then we automatically set the
441 	 * mountpoint of the root dataset to be '/'.
442 	 */
443 	if (altroot != NULL) {
444 		zfs_handle_t *zhp;
445 
446 		verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_ANY)) != NULL);
447 		verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
448 		    "/") == 0);
449 
450 		zfs_close(zhp);
451 	}
452 
453 	return (0);
454 }
455 
456 /*
457  * Destroy the given pool.  It is up to the caller to ensure that there are no
458  * datasets left in the pool.
459  */
460 int
461 zpool_destroy(zpool_handle_t *zhp)
462 {
463 	zfs_cmd_t zc = { 0 };
464 	zfs_handle_t *zfp = NULL;
465 	libzfs_handle_t *hdl = zhp->zpool_hdl;
466 	char msg[1024];
467 
468 	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
469 	    (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
470 	    ZFS_TYPE_FILESYSTEM)) == NULL)
471 		return (-1);
472 
473 	if (zpool_remove_zvol_links(zhp) != NULL)
474 		return (-1);
475 
476 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
477 
478 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
479 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
480 		    "cannot destroy '%s'"), zhp->zpool_name);
481 
482 		if (errno == EROFS) {
483 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
484 			    "one or more devices is read only"));
485 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
486 		} else {
487 			(void) zpool_standard_error(hdl, errno, msg);
488 		}
489 
490 		if (zfp)
491 			zfs_close(zfp);
492 		return (-1);
493 	}
494 
495 	if (zfp) {
496 		remove_mountpoint(zfp);
497 		zfs_close(zfp);
498 	}
499 
500 	return (0);
501 }
502 
503 /*
504  * Add the given vdevs to the pool.  The caller must have already performed the
505  * necessary verification to ensure that the vdev specification is well-formed.
506  */
507 int
508 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
509 {
510 	zfs_cmd_t zc = { 0 };
511 	int ret;
512 	libzfs_handle_t *hdl = zhp->zpool_hdl;
513 	char msg[1024];
514 	nvlist_t **spares;
515 	uint_t nspares;
516 
517 	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
518 	    "cannot add to '%s'"), zhp->zpool_name);
519 
520 	if (zpool_get_version(zhp) < ZFS_VERSION_SPARES &&
521 	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
522 	    &spares, &nspares) == 0) {
523 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
524 		    "upgraded to add hot spares"));
525 		return (zfs_error(hdl, EZFS_BADVERSION, msg));
526 	}
527 
528 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
529 		return (-1);
530 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
531 
532 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ADD, &zc) != 0) {
533 		switch (errno) {
534 		case EBUSY:
535 			/*
536 			 * This can happen if the user has specified the same
537 			 * device multiple times.  We can't reliably detect this
538 			 * until we try to add it and see we already have a
539 			 * label.
540 			 */
541 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 			    "one or more vdevs refer to the same device"));
543 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
544 			break;
545 
546 		case EOVERFLOW:
547 			/*
548 			 * This occurrs when one of the devices is below
549 			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
550 			 * device was the problem device since there's no
551 			 * reliable way to determine device size from userland.
552 			 */
553 			{
554 				char buf[64];
555 
556 				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
557 
558 				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
559 				    "device is less than the minimum "
560 				    "size (%s)"), buf);
561 			}
562 			(void) zfs_error(hdl, EZFS_BADDEV, msg);
563 			break;
564 
565 		case ENOTSUP:
566 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
567 			    "pool must be upgraded to add raidz2 vdevs"));
568 			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
569 			break;
570 
571 		default:
572 			(void) zpool_standard_error(hdl, errno, msg);
573 		}
574 
575 		ret = -1;
576 	} else {
577 		ret = 0;
578 	}
579 
580 	zcmd_free_nvlists(&zc);
581 
582 	return (ret);
583 }
584 
585 /*
586  * Exports the pool from the system.  The caller must ensure that there are no
587  * mounted datasets in the pool.
588  */
589 int
590 zpool_export(zpool_handle_t *zhp)
591 {
592 	zfs_cmd_t zc = { 0 };
593 
594 	if (zpool_remove_zvol_links(zhp) != 0)
595 		return (-1);
596 
597 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
598 
599 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_EXPORT, &zc) != 0)
600 		return (zpool_standard_error(zhp->zpool_hdl, errno,
601 		    dgettext(TEXT_DOMAIN, "cannot export '%s'"),
602 		    zhp->zpool_name));
603 
604 	return (0);
605 }
606 
607 /*
608  * Import the given pool using the known configuration.  The configuration
609  * should have come from zpool_find_import().  The 'newname' and 'altroot'
610  * parameters control whether the pool is imported with a different name or with
611  * an alternate root, respectively.
612  */
613 int
614 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
615     const char *altroot)
616 {
617 	zfs_cmd_t zc = { 0 };
618 	char *thename;
619 	char *origname;
620 	int ret;
621 
622 	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
623 	    &origname) == 0);
624 
625 	if (newname != NULL) {
626 		if (!zpool_name_valid(hdl, B_FALSE, newname))
627 			return (zfs_error(hdl, EZFS_INVALIDNAME,
628 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
629 			    newname));
630 		thename = (char *)newname;
631 	} else {
632 		thename = origname;
633 	}
634 
635 	if (altroot != NULL && altroot[0] != '/')
636 		return (zfs_error(hdl, EZFS_BADPATH,
637 		    dgettext(TEXT_DOMAIN, "bad alternate root '%s'"),
638 		    altroot));
639 
640 	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
641 
642 	if (altroot != NULL)
643 		(void) strlcpy(zc.zc_value, altroot, sizeof (zc.zc_value));
644 	else
645 		zc.zc_value[0] = '\0';
646 
647 	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
648 	    &zc.zc_guid) == 0);
649 
650 	if (zcmd_write_src_nvlist(hdl, &zc, config, NULL) != 0)
651 		return (-1);
652 
653 	ret = 0;
654 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_IMPORT, &zc) != 0) {
655 		char desc[1024];
656 		if (newname == NULL)
657 			(void) snprintf(desc, sizeof (desc),
658 			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
659 			    thename);
660 		else
661 			(void) snprintf(desc, sizeof (desc),
662 			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
663 			    origname, thename);
664 
665 		switch (errno) {
666 		case ENOTSUP:
667 			/*
668 			 * Unsupported version.
669 			 */
670 			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
671 			break;
672 
673 		case EINVAL:
674 			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
675 			break;
676 
677 		default:
678 			(void) zpool_standard_error(hdl, errno, desc);
679 		}
680 
681 		ret = -1;
682 	} else {
683 		zpool_handle_t *zhp;
684 		/*
685 		 * This should never fail, but play it safe anyway.
686 		 */
687 		if (zpool_open_silent(hdl, thename, &zhp) != 0) {
688 			ret = -1;
689 		} else if (zhp != NULL) {
690 			ret = zpool_create_zvol_links(zhp);
691 			zpool_close(zhp);
692 		}
693 	}
694 
695 	zcmd_free_nvlists(&zc);
696 	return (ret);
697 }
698 
699 /*
700  * Scrub the pool.
701  */
702 int
703 zpool_scrub(zpool_handle_t *zhp, pool_scrub_type_t type)
704 {
705 	zfs_cmd_t zc = { 0 };
706 	char msg[1024];
707 	libzfs_handle_t *hdl = zhp->zpool_hdl;
708 
709 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
710 	zc.zc_cookie = type;
711 
712 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_POOL_SCRUB, &zc) == 0)
713 		return (0);
714 
715 	(void) snprintf(msg, sizeof (msg),
716 	    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
717 
718 	if (errno == EBUSY)
719 		return (zfs_error(hdl, EZFS_RESILVERING, msg));
720 	else
721 		return (zpool_standard_error(hdl, errno, msg));
722 }
723 
724 /*
725  * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
726  * spare; but FALSE if its an INUSE spare.
727  */
728 static nvlist_t *
729 vdev_to_nvlist_iter(nvlist_t *nv, const char *search, uint64_t guid,
730     boolean_t *avail_spare)
731 {
732 	uint_t c, children;
733 	nvlist_t **child;
734 	uint64_t theguid, present;
735 	char *path;
736 	uint64_t wholedisk = 0;
737 	nvlist_t *ret;
738 
739 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0);
740 
741 	if (search == NULL &&
742 	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) {
743 		/*
744 		 * If the device has never been present since import, the only
745 		 * reliable way to match the vdev is by GUID.
746 		 */
747 		if (theguid == guid)
748 			return (nv);
749 	} else if (search != NULL &&
750 	    nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
751 		(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
752 		    &wholedisk);
753 		if (wholedisk) {
754 			/*
755 			 * For whole disks, the internal path has 's0', but the
756 			 * path passed in by the user doesn't.
757 			 */
758 			if (strlen(search) == strlen(path) - 2 &&
759 			    strncmp(search, path, strlen(search)) == 0)
760 				return (nv);
761 		} else if (strcmp(search, path) == 0) {
762 			return (nv);
763 		}
764 	}
765 
766 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
767 	    &child, &children) != 0)
768 		return (NULL);
769 
770 	for (c = 0; c < children; c++)
771 		if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
772 		    avail_spare)) != NULL)
773 			return (ret);
774 
775 	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
776 	    &child, &children) == 0) {
777 		for (c = 0; c < children; c++) {
778 			if ((ret = vdev_to_nvlist_iter(child[c], search, guid,
779 			    avail_spare)) != NULL) {
780 				*avail_spare = B_TRUE;
781 				return (ret);
782 			}
783 		}
784 	}
785 
786 	return (NULL);
787 }
788 
789 nvlist_t *
790 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare)
791 {
792 	char buf[MAXPATHLEN];
793 	const char *search;
794 	char *end;
795 	nvlist_t *nvroot;
796 	uint64_t guid;
797 
798 	guid = strtoull(path, &end, 10);
799 	if (guid != 0 && *end == '\0') {
800 		search = NULL;
801 	} else if (path[0] != '/') {
802 		(void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
803 		search = buf;
804 	} else {
805 		search = path;
806 	}
807 
808 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
809 	    &nvroot) == 0);
810 
811 	*avail_spare = B_FALSE;
812 	return (vdev_to_nvlist_iter(nvroot, search, guid, avail_spare));
813 }
814 
815 /*
816  * Returns TRUE if the given guid corresponds to a spare (INUSE or not).
817  */
818 static boolean_t
819 is_spare(zpool_handle_t *zhp, uint64_t guid)
820 {
821 	uint64_t spare_guid;
822 	nvlist_t *nvroot;
823 	nvlist_t **spares;
824 	uint_t nspares;
825 	int i;
826 
827 	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
828 	    &nvroot) == 0);
829 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
830 	    &spares, &nspares) == 0) {
831 		for (i = 0; i < nspares; i++) {
832 			verify(nvlist_lookup_uint64(spares[i],
833 			    ZPOOL_CONFIG_GUID, &spare_guid) == 0);
834 			if (guid == spare_guid)
835 				return (B_TRUE);
836 		}
837 	}
838 
839 	return (B_FALSE);
840 }
841 
842 /*
843  * Bring the specified vdev online
844  */
845 int
846 zpool_vdev_online(zpool_handle_t *zhp, const char *path)
847 {
848 	zfs_cmd_t zc = { 0 };
849 	char msg[1024];
850 	nvlist_t *tgt;
851 	boolean_t avail_spare;
852 	libzfs_handle_t *hdl = zhp->zpool_hdl;
853 
854 	(void) snprintf(msg, sizeof (msg),
855 	    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
856 
857 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
858 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
859 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
860 
861 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
862 
863 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
864 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
865 
866 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ONLINE, &zc) == 0)
867 		return (0);
868 
869 	return (zpool_standard_error(hdl, errno, msg));
870 }
871 
872 /*
873  * Take the specified vdev offline
874  */
875 int
876 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, int istmp)
877 {
878 	zfs_cmd_t zc = { 0 };
879 	char msg[1024];
880 	nvlist_t *tgt;
881 	boolean_t avail_spare;
882 	libzfs_handle_t *hdl = zhp->zpool_hdl;
883 
884 	(void) snprintf(msg, sizeof (msg),
885 	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
886 
887 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
888 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == NULL)
889 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
890 
891 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
892 
893 	if (avail_spare || is_spare(zhp, zc.zc_guid) == B_TRUE)
894 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
895 
896 	zc.zc_cookie = istmp;
897 
898 	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_OFFLINE, &zc) == 0)
899 		return (0);
900 
901 	switch (errno) {
902 	case EBUSY:
903 
904 		/*
905 		 * There are no other replicas of this device.
906 		 */
907 		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
908 
909 	default:
910 		return (zpool_standard_error(hdl, errno, msg));
911 	}
912 }
913 
914 /*
915  * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
916  * a hot spare.
917  */
918 static boolean_t
919 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
920 {
921 	nvlist_t **child;
922 	uint_t c, children;
923 	char *type;
924 
925 	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
926 	    &children) == 0) {
927 		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
928 		    &type) == 0);
929 
930 		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
931 		    children == 2 && child[which] == tgt)
932 			return (B_TRUE);
933 
934 		for (c = 0; c < children; c++)
935 			if (is_replacing_spare(child[c], tgt, which))
936 				return (B_TRUE);
937 	}
938 
939 	return (B_FALSE);
940 }
941 
942 /*
943  * Attach new_disk (fully described by nvroot) to old_disk.
944  * If 'replacing' is specified, tne new disk will replace the old one.
945  */
946 int
947 zpool_vdev_attach(zpool_handle_t *zhp,
948     const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
949 {
950 	zfs_cmd_t zc = { 0 };
951 	char msg[1024];
952 	int ret;
953 	nvlist_t *tgt;
954 	boolean_t avail_spare;
955 	uint64_t val;
956 	char *path;
957 	nvlist_t **child;
958 	uint_t children;
959 	nvlist_t *config_root;
960 	libzfs_handle_t *hdl = zhp->zpool_hdl;
961 
962 	if (replacing)
963 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
964 		    "cannot replace %s with %s"), old_disk, new_disk);
965 	else
966 		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
967 		    "cannot attach %s to %s"), new_disk, old_disk);
968 
969 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
970 	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare)) == 0)
971 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
972 
973 	if (avail_spare)
974 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
975 
976 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
977 	zc.zc_cookie = replacing;
978 
979 	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
980 	    &child, &children) != 0 || children != 1) {
981 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
982 		    "new device must be a single disk"));
983 		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
984 	}
985 
986 	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
987 	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
988 
989 	/*
990 	 * If the target is a hot spare that has been swapped in, we can only
991 	 * replace it with another hot spare.
992 	 */
993 	if (replacing &&
994 	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
995 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
996 	    (zpool_find_vdev(zhp, path, &avail_spare) == NULL ||
997 	    !avail_spare) && is_replacing_spare(config_root, tgt, 1)) {
998 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
999 		    "can only be replaced by another hot spare"));
1000 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1001 	}
1002 
1003 	/*
1004 	 * If we are attempting to replace a spare, it canot be applied to an
1005 	 * already spared device.
1006 	 */
1007 	if (replacing &&
1008 	    nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 &&
1009 	    zpool_find_vdev(zhp, path, &avail_spare) != NULL && avail_spare &&
1010 	    is_replacing_spare(config_root, tgt, 0)) {
1011 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1012 		    "device has already been replaced with a spare"));
1013 		return (zfs_error(hdl, EZFS_BADTARGET, msg));
1014 	}
1015 
1016 	if (zcmd_write_src_nvlist(hdl, &zc, nvroot, NULL) != 0)
1017 		return (-1);
1018 
1019 	ret = ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_ATTACH, &zc);
1020 
1021 	zcmd_free_nvlists(&zc);
1022 
1023 	if (ret == 0)
1024 		return (0);
1025 
1026 	switch (errno) {
1027 	case ENOTSUP:
1028 		/*
1029 		 * Can't attach to or replace this type of vdev.
1030 		 */
1031 		if (replacing)
1032 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1033 			    "cannot replace a replacing device"));
1034 		else
1035 			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1036 			    "can only attach to mirrors and top-level "
1037 			    "disks"));
1038 		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
1039 		break;
1040 
1041 	case EINVAL:
1042 		/*
1043 		 * The new device must be a single disk.
1044 		 */
1045 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1046 		    "new device must be a single disk"));
1047 		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
1048 		break;
1049 
1050 	case EBUSY:
1051 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
1052 		    new_disk);
1053 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1054 		break;
1055 
1056 	case EOVERFLOW:
1057 		/*
1058 		 * The new device is too small.
1059 		 */
1060 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1061 		    "device is too small"));
1062 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1063 		break;
1064 
1065 	case EDOM:
1066 		/*
1067 		 * The new device has a different alignment requirement.
1068 		 */
1069 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1070 		    "devices have different sector alignment"));
1071 		(void) zfs_error(hdl, EZFS_BADDEV, msg);
1072 		break;
1073 
1074 	case ENAMETOOLONG:
1075 		/*
1076 		 * The resulting top-level vdev spec won't fit in the label.
1077 		 */
1078 		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
1079 		break;
1080 
1081 	default:
1082 		(void) zpool_standard_error(hdl, errno, msg);
1083 	}
1084 
1085 	return (-1);
1086 }
1087 
1088 /*
1089  * Detach the specified device.
1090  */
1091 int
1092 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
1093 {
1094 	zfs_cmd_t zc = { 0 };
1095 	char msg[1024];
1096 	nvlist_t *tgt;
1097 	boolean_t avail_spare;
1098 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1099 
1100 	(void) snprintf(msg, sizeof (msg),
1101 	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
1102 
1103 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1104 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1105 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1106 
1107 	if (avail_spare)
1108 		return (zfs_error(hdl, EZFS_ISSPARE, msg));
1109 
1110 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1111 
1112 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_DETACH, &zc) == 0)
1113 		return (0);
1114 
1115 	switch (errno) {
1116 
1117 	case ENOTSUP:
1118 		/*
1119 		 * Can't detach from this type of vdev.
1120 		 */
1121 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
1122 		    "applicable to mirror and replacing vdevs"));
1123 		(void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg);
1124 		break;
1125 
1126 	case EBUSY:
1127 		/*
1128 		 * There are no other replicas of this device.
1129 		 */
1130 		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
1131 		break;
1132 
1133 	default:
1134 		(void) zpool_standard_error(hdl, errno, msg);
1135 	}
1136 
1137 	return (-1);
1138 }
1139 
1140 /*
1141  * Remove the given device.  Currently, this is supported only for hot spares.
1142  */
1143 int
1144 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
1145 {
1146 	zfs_cmd_t zc = { 0 };
1147 	char msg[1024];
1148 	nvlist_t *tgt;
1149 	boolean_t avail_spare;
1150 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1151 
1152 	(void) snprintf(msg, sizeof (msg),
1153 	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
1154 
1155 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1156 	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1157 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1158 
1159 	if (!avail_spare) {
1160 		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1161 		    "only hot spares can be removed"));
1162 		return (zfs_error(hdl, EZFS_NODEVICE, msg));
1163 	}
1164 
1165 	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
1166 
1167 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
1168 		return (0);
1169 
1170 	return (zpool_standard_error(hdl, errno, msg));
1171 }
1172 
1173 /*
1174  * Clear the errors for the pool, or the particular device if specified.
1175  */
1176 int
1177 zpool_clear(zpool_handle_t *zhp, const char *path)
1178 {
1179 	zfs_cmd_t zc = { 0 };
1180 	char msg[1024];
1181 	nvlist_t *tgt;
1182 	boolean_t avail_spare;
1183 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1184 
1185 	if (path)
1186 		(void) snprintf(msg, sizeof (msg),
1187 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1188 		    path);
1189 	else
1190 		(void) snprintf(msg, sizeof (msg),
1191 		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
1192 		    zhp->zpool_name);
1193 
1194 	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1195 	if (path) {
1196 		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare)) == 0)
1197 			return (zfs_error(hdl, EZFS_NODEVICE, msg));
1198 
1199 		if (avail_spare)
1200 			return (zfs_error(hdl, EZFS_ISSPARE, msg));
1201 
1202 		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
1203 		    &zc.zc_guid) == 0);
1204 	}
1205 
1206 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
1207 		return (0);
1208 
1209 	return (zpool_standard_error(hdl, errno, msg));
1210 }
1211 
1212 static int
1213 do_zvol(zfs_handle_t *zhp, void *data)
1214 {
1215 	int linktype = (int)(uintptr_t)data;
1216 	int ret;
1217 
1218 	/*
1219 	 * We check for volblocksize intead of ZFS_TYPE_VOLUME so that we
1220 	 * correctly handle snapshots of volumes.
1221 	 */
1222 	if (ZFS_IS_VOLUME(zhp)) {
1223 		if (linktype)
1224 			ret = zvol_create_link(zhp->zfs_hdl, zhp->zfs_name);
1225 		else
1226 			ret = zvol_remove_link(zhp->zfs_hdl, zhp->zfs_name);
1227 	}
1228 
1229 	ret = zfs_iter_children(zhp, do_zvol, data);
1230 
1231 	zfs_close(zhp);
1232 	return (ret);
1233 }
1234 
1235 /*
1236  * Iterate over all zvols in the pool and make any necessary minor nodes.
1237  */
1238 int
1239 zpool_create_zvol_links(zpool_handle_t *zhp)
1240 {
1241 	zfs_handle_t *zfp;
1242 	int ret;
1243 
1244 	/*
1245 	 * If the pool is unavailable, just return success.
1246 	 */
1247 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1248 	    zhp->zpool_name)) == NULL)
1249 		return (0);
1250 
1251 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_TRUE);
1252 
1253 	zfs_close(zfp);
1254 	return (ret);
1255 }
1256 
1257 /*
1258  * Iterate over all zvols in the poool and remove any minor nodes.
1259  */
1260 int
1261 zpool_remove_zvol_links(zpool_handle_t *zhp)
1262 {
1263 	zfs_handle_t *zfp;
1264 	int ret;
1265 
1266 	/*
1267 	 * If the pool is unavailable, just return success.
1268 	 */
1269 	if ((zfp = make_dataset_handle(zhp->zpool_hdl,
1270 	    zhp->zpool_name)) == NULL)
1271 		return (0);
1272 
1273 	ret = zfs_iter_children(zfp, do_zvol, (void *)B_FALSE);
1274 
1275 	zfs_close(zfp);
1276 	return (ret);
1277 }
1278 
1279 /*
1280  * Convert from a devid string to a path.
1281  */
1282 static char *
1283 devid_to_path(char *devid_str)
1284 {
1285 	ddi_devid_t devid;
1286 	char *minor;
1287 	char *path;
1288 	devid_nmlist_t *list = NULL;
1289 	int ret;
1290 
1291 	if (devid_str_decode(devid_str, &devid, &minor) != 0)
1292 		return (NULL);
1293 
1294 	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
1295 
1296 	devid_str_free(minor);
1297 	devid_free(devid);
1298 
1299 	if (ret != 0)
1300 		return (NULL);
1301 
1302 	if ((path = strdup(list[0].devname)) == NULL)
1303 		return (NULL);
1304 
1305 	devid_free_nmlist(list);
1306 
1307 	return (path);
1308 }
1309 
1310 /*
1311  * Convert from a path to a devid string.
1312  */
1313 static char *
1314 path_to_devid(const char *path)
1315 {
1316 	int fd;
1317 	ddi_devid_t devid;
1318 	char *minor, *ret;
1319 
1320 	if ((fd = open(path, O_RDONLY)) < 0)
1321 		return (NULL);
1322 
1323 	minor = NULL;
1324 	ret = NULL;
1325 	if (devid_get(fd, &devid) == 0) {
1326 		if (devid_get_minor_name(fd, &minor) == 0)
1327 			ret = devid_str_encode(devid, minor);
1328 		if (minor != NULL)
1329 			devid_str_free(minor);
1330 		devid_free(devid);
1331 	}
1332 	(void) close(fd);
1333 
1334 	return (ret);
1335 }
1336 
1337 /*
1338  * Issue the necessary ioctl() to update the stored path value for the vdev.  We
1339  * ignore any failure here, since a common case is for an unprivileged user to
1340  * type 'zpool status', and we'll display the correct information anyway.
1341  */
1342 static void
1343 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
1344 {
1345 	zfs_cmd_t zc = { 0 };
1346 
1347 	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1348 	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
1349 	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1350 	    &zc.zc_guid) == 0);
1351 
1352 	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
1353 }
1354 
1355 /*
1356  * Given a vdev, return the name to display in iostat.  If the vdev has a path,
1357  * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
1358  * We also check if this is a whole disk, in which case we strip off the
1359  * trailing 's0' slice name.
1360  *
1361  * This routine is also responsible for identifying when disks have been
1362  * reconfigured in a new location.  The kernel will have opened the device by
1363  * devid, but the path will still refer to the old location.  To catch this, we
1364  * first do a path -> devid translation (which is fast for the common case).  If
1365  * the devid matches, we're done.  If not, we do a reverse devid -> path
1366  * translation and issue the appropriate ioctl() to update the path of the vdev.
1367  * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
1368  * of these checks.
1369  */
1370 char *
1371 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv)
1372 {
1373 	char *path, *devid;
1374 	uint64_t value;
1375 	char buf[64];
1376 
1377 	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
1378 	    &value) == 0) {
1379 		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1380 		    &value) == 0);
1381 		(void) snprintf(buf, sizeof (buf), "%llu", value);
1382 		path = buf;
1383 	} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
1384 
1385 		if (zhp != NULL &&
1386 		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
1387 			/*
1388 			 * Determine if the current path is correct.
1389 			 */
1390 			char *newdevid = path_to_devid(path);
1391 
1392 			if (newdevid == NULL ||
1393 			    strcmp(devid, newdevid) != 0) {
1394 				char *newpath;
1395 
1396 				if ((newpath = devid_to_path(devid)) != NULL) {
1397 					/*
1398 					 * Update the path appropriately.
1399 					 */
1400 					set_path(zhp, nv, newpath);
1401 					if (nvlist_add_string(nv,
1402 					    ZPOOL_CONFIG_PATH, newpath) == 0)
1403 						verify(nvlist_lookup_string(nv,
1404 						    ZPOOL_CONFIG_PATH,
1405 						    &path) == 0);
1406 					free(newpath);
1407 				}
1408 			}
1409 
1410 			if (newdevid)
1411 				devid_str_free(newdevid);
1412 		}
1413 
1414 		if (strncmp(path, "/dev/dsk/", 9) == 0)
1415 			path += 9;
1416 
1417 		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1418 		    &value) == 0 && value) {
1419 			char *tmp = zfs_strdup(hdl, path);
1420 			if (tmp == NULL)
1421 				return (NULL);
1422 			tmp[strlen(path) - 2] = '\0';
1423 			return (tmp);
1424 		}
1425 	} else {
1426 		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
1427 
1428 		/*
1429 		 * If it's a raidz device, we need to stick in the parity level.
1430 		 */
1431 		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
1432 			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
1433 			    &value) == 0);
1434 			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
1435 			    value);
1436 			path = buf;
1437 		}
1438 	}
1439 
1440 	return (zfs_strdup(hdl, path));
1441 }
1442 
1443 static int
1444 zbookmark_compare(const void *a, const void *b)
1445 {
1446 	return (memcmp(a, b, sizeof (zbookmark_t)));
1447 }
1448 
1449 /*
1450  * Retrieve the persistent error log, uniquify the members, and return to the
1451  * caller.
1452  */
1453 int
1454 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t ***list, size_t *nelem)
1455 {
1456 	zfs_cmd_t zc = { 0 };
1457 	uint64_t count;
1458 	zbookmark_t *zb = NULL;
1459 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1460 	int i, j;
1461 
1462 	if (zhp->zpool_error_log != NULL) {
1463 		*list = zhp->zpool_error_log;
1464 		*nelem = zhp->zpool_error_count;
1465 		return (0);
1466 	}
1467 
1468 	/*
1469 	 * Retrieve the raw error list from the kernel.  If the number of errors
1470 	 * has increased, allocate more space and continue until we get the
1471 	 * entire list.
1472 	 */
1473 	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
1474 	    &count) == 0);
1475 	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
1476 	    count * sizeof (zbookmark_t))) == NULL)
1477 		return (-1);
1478 	zc.zc_nvlist_dst_size = count;
1479 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1480 	for (;;) {
1481 		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
1482 		    &zc) != 0) {
1483 			free((void *)(uintptr_t)zc.zc_nvlist_dst);
1484 			if (errno == ENOMEM) {
1485 				if ((zc.zc_nvlist_dst = (uintptr_t)
1486 				    zfs_alloc(zhp->zpool_hdl,
1487 				    zc.zc_nvlist_dst_size)) == NULL)
1488 					return (-1);
1489 			} else {
1490 				return (-1);
1491 			}
1492 		} else {
1493 			break;
1494 		}
1495 	}
1496 
1497 	/*
1498 	 * Sort the resulting bookmarks.  This is a little confusing due to the
1499 	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
1500 	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
1501 	 * _not_ copied as part of the process.  So we point the start of our
1502 	 * array appropriate and decrement the total number of elements.
1503 	 */
1504 	zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
1505 	    zc.zc_nvlist_dst_size;
1506 	count -= zc.zc_nvlist_dst_size;
1507 	zc.zc_nvlist_dst = 0ULL;
1508 
1509 	qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
1510 
1511 	/*
1512 	 * Count the number of unique elements
1513 	 */
1514 	j = 0;
1515 	for (i = 0; i < count; i++) {
1516 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1517 		    sizeof (zbookmark_t)) == 0)
1518 			continue;
1519 		j++;
1520 	}
1521 
1522 	/*
1523 	 * If the user has only requested the number of items, return it now
1524 	 * without bothering with the extra work.
1525 	 */
1526 	if (list == NULL) {
1527 		*nelem = j;
1528 		free((void *)(uintptr_t)zc.zc_nvlist_dst);
1529 		return (0);
1530 	}
1531 
1532 	zhp->zpool_error_count = j;
1533 
1534 	/*
1535 	 * Allocate an array of nvlists to hold the results
1536 	 */
1537 	if ((zhp->zpool_error_log = zfs_alloc(zhp->zpool_hdl,
1538 	    j * sizeof (nvlist_t *))) == NULL) {
1539 		free((void *)(uintptr_t)zc.zc_nvlist_dst);
1540 		return (-1);
1541 	}
1542 
1543 	/*
1544 	 * Fill in the results with names from the kernel.
1545 	 */
1546 	j = 0;
1547 	for (i = 0; i < count; i++) {
1548 		char buf[64];
1549 		nvlist_t *nv;
1550 
1551 		if (i > 0 && memcmp(&zb[i - 1], &zb[i],
1552 		    sizeof (zbookmark_t)) == 0)
1553 			continue;
1554 
1555 		if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
1556 			goto nomem;
1557 
1558 		zc.zc_bookmark = zb[i];
1559 		for (;;) {
1560 			if (ioctl(zhp->zpool_hdl->libzfs_fd,
1561 			    ZFS_IOC_BOOKMARK_NAME, &zc) != 0) {
1562 				if (errno == ENOMEM) {
1563 					if (zcmd_expand_dst_nvlist(hdl, &zc)
1564 					    != 0) {
1565 						zcmd_free_nvlists(&zc);
1566 						goto nomem;
1567 					}
1568 
1569 					continue;
1570 				} else {
1571 					if (nvlist_alloc(&nv, NV_UNIQUE_NAME,
1572 					    0) != 0)
1573 						goto nomem;
1574 
1575 					zhp->zpool_error_log[j] = nv;
1576 					(void) snprintf(buf, sizeof (buf),
1577 					    "%llx", zb[i].zb_objset);
1578 					if (nvlist_add_string(nv,
1579 					    ZPOOL_ERR_DATASET, buf) != 0)
1580 						goto nomem;
1581 					(void) snprintf(buf, sizeof (buf),
1582 					    "%llx", zb[i].zb_object);
1583 					if (nvlist_add_string(nv,
1584 					    ZPOOL_ERR_OBJECT, buf) != 0)
1585 						goto nomem;
1586 					(void) snprintf(buf, sizeof (buf),
1587 					    "lvl=%u blkid=%llu",
1588 					    (int)zb[i].zb_level,
1589 					    (long long)zb[i].zb_blkid);
1590 					if (nvlist_add_string(nv,
1591 					    ZPOOL_ERR_RANGE, buf) != 0)
1592 						goto nomem;
1593 				}
1594 			} else {
1595 				if (zcmd_read_dst_nvlist(hdl, &zc,
1596 				    &zhp->zpool_error_log[j]) != 0) {
1597 					zcmd_free_nvlists(&zc);
1598 					goto nomem;
1599 				}
1600 			}
1601 
1602 			break;
1603 		}
1604 
1605 		zcmd_free_nvlists(&zc);
1606 
1607 		j++;
1608 	}
1609 
1610 	*list = zhp->zpool_error_log;
1611 	*nelem = zhp->zpool_error_count;
1612 	free(zb);
1613 
1614 	return (0);
1615 
1616 nomem:
1617 	free(zb);
1618 	free((void *)(uintptr_t)zc.zc_nvlist_dst);
1619 	for (i = 0; i < zhp->zpool_error_count; i++)
1620 		nvlist_free(zhp->zpool_error_log[i]);
1621 	free(zhp->zpool_error_log);
1622 	zhp->zpool_error_log = NULL;
1623 	return (no_memory(zhp->zpool_hdl));
1624 }
1625 
1626 /*
1627  * Upgrade a ZFS pool to the latest on-disk version.
1628  */
1629 int
1630 zpool_upgrade(zpool_handle_t *zhp)
1631 {
1632 	zfs_cmd_t zc = { 0 };
1633 	libzfs_handle_t *hdl = zhp->zpool_hdl;
1634 
1635 	(void) strcpy(zc.zc_name, zhp->zpool_name);
1636 	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
1637 		return (zpool_standard_error(hdl, errno,
1638 		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
1639 		    zhp->zpool_name));
1640 
1641 	return (0);
1642 }
1643