1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2017 Nexenta Systems, Inc.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
26 * Copyright 2015 RackTop Systems.
27 * Copyright (c) 2016, Intel Corporation.
28 * Copyright 2020 Joyent, Inc.
29 */
30
31 /*
32 * Pool import support functions.
33 *
34 * Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
35 * these commands are expected to run in the global zone, we can assume
36 * that the devices are all readable when called.
37 *
38 * To import a pool, we rely on reading the configuration information from the
39 * ZFS label of each device. If we successfully read the label, then we
40 * organize the configuration information in the following hierarchy:
41 *
42 * pool guid -> toplevel vdev guid -> label txg
43 *
44 * Duplicate entries matching this same tuple will be discarded. Once we have
45 * examined every device, we pick the best label txg config for each toplevel
46 * vdev. We then arrange these toplevel vdevs into a complete pool config, and
47 * update any paths that have changed. Finally, we attempt to import the pool
48 * using our derived config, and record the results.
49 */
50
51 #include <stdio.h>
52 #include <stdarg.h>
53 #include <assert.h>
54 #include <ctype.h>
55 #include <devid.h>
56 #include <dirent.h>
57 #include <errno.h>
58 #include <libintl.h>
59 #include <stddef.h>
60 #include <stdlib.h>
61 #include <string.h>
62 #include <sys/stat.h>
63 #include <unistd.h>
64 #include <fcntl.h>
65 #include <sys/vtoc.h>
66 #include <sys/dktp/fdisk.h>
67 #include <sys/efi_partition.h>
68 #include <sys/vdev_impl.h>
69 #include <sys/fs/zfs.h>
70
71 #include <thread_pool.h>
72 #include <libzutil.h>
73 #include <libnvpair.h>
74
75 #include "zutil_import.h"
76
77 #ifdef NDEBUG
78 #define verify(EX) ((void)(EX))
79 #else
80 #define verify(EX) assert(EX)
81 #endif
82
83 /*PRINTFLIKE2*/
84 static void
zutil_error_aux(libpc_handle_t * hdl,const char * fmt,...)85 zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...)
86 {
87 va_list ap;
88
89 va_start(ap, fmt);
90
91 (void) vsnprintf(hdl->lpc_desc, sizeof (hdl->lpc_desc), fmt, ap);
92 hdl->lpc_desc_active = B_TRUE;
93
94 va_end(ap);
95 }
96
97 static void
zutil_verror(libpc_handle_t * hdl,const char * error,const char * fmt,va_list ap)98 zutil_verror(libpc_handle_t *hdl, const char *error, const char *fmt,
99 va_list ap)
100 {
101 char action[1024];
102
103 (void) vsnprintf(action, sizeof (action), fmt, ap);
104
105 if (hdl->lpc_desc_active)
106 hdl->lpc_desc_active = B_FALSE;
107 else
108 hdl->lpc_desc[0] = '\0';
109
110 if (hdl->lpc_printerr) {
111 if (hdl->lpc_desc[0] != '\0')
112 error = hdl->lpc_desc;
113
114 (void) fprintf(stderr, "%s: %s\n", action, error);
115 }
116 }
117
118 /*PRINTFLIKE3*/
119 static int
zutil_error_fmt(libpc_handle_t * hdl,const char * error,const char * fmt,...)120 zutil_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...)
121 {
122 va_list ap;
123
124 va_start(ap, fmt);
125
126 zutil_verror(hdl, error, fmt, ap);
127
128 va_end(ap);
129
130 return (-1);
131 }
132
133 static int
zutil_error(libpc_handle_t * hdl,const char * error,const char * msg)134 zutil_error(libpc_handle_t *hdl, const char *error, const char *msg)
135 {
136 return (zutil_error_fmt(hdl, error, "%s", msg));
137 }
138
139 static int
zutil_no_memory(libpc_handle_t * hdl)140 zutil_no_memory(libpc_handle_t *hdl)
141 {
142 (void) zutil_error(hdl, EZFS_NOMEM, "internal error");
143 exit(1);
144 }
145
146 void *
zutil_alloc(libpc_handle_t * hdl,size_t size)147 zutil_alloc(libpc_handle_t *hdl, size_t size)
148 {
149 void *data;
150
151 if ((data = calloc(1, size)) == NULL)
152 (void) zutil_no_memory(hdl);
153
154 return (data);
155 }
156
157 char *
zutil_strdup(libpc_handle_t * hdl,const char * str)158 zutil_strdup(libpc_handle_t *hdl, const char *str)
159 {
160 char *ret;
161
162 if ((ret = strdup(str)) == NULL)
163 (void) zutil_no_memory(hdl);
164
165 return (ret);
166 }
167
168 /*
169 * Intermediate structures used to gather configuration information.
170 */
171 typedef struct config_entry {
172 uint64_t ce_txg;
173 nvlist_t *ce_config;
174 struct config_entry *ce_next;
175 } config_entry_t;
176
177 typedef struct vdev_entry {
178 uint64_t ve_guid;
179 config_entry_t *ve_configs;
180 struct vdev_entry *ve_next;
181 } vdev_entry_t;
182
183 typedef struct pool_entry {
184 uint64_t pe_guid;
185 vdev_entry_t *pe_vdevs;
186 struct pool_entry *pe_next;
187 } pool_entry_t;
188
189 typedef struct name_entry {
190 char *ne_name;
191 uint64_t ne_guid;
192 struct name_entry *ne_next;
193 } name_entry_t;
194
195 typedef struct pool_list {
196 pool_entry_t *pools;
197 name_entry_t *names;
198 } pool_list_t;
199
200 /*
201 * Go through and fix up any path and/or devid information for the given vdev
202 * configuration.
203 */
204 static int
fix_paths(nvlist_t * nv,name_entry_t * names)205 fix_paths(nvlist_t *nv, name_entry_t *names)
206 {
207 nvlist_t **child;
208 uint_t c, children;
209 uint64_t guid;
210 name_entry_t *ne, *best;
211 char *path, *devid;
212 int matched;
213
214 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
215 &child, &children) == 0) {
216 for (c = 0; c < children; c++)
217 if (fix_paths(child[c], names) != 0)
218 return (-1);
219 return (0);
220 }
221
222 /*
223 * This is a leaf (file or disk) vdev. In either case, go through
224 * the name list and see if we find a matching guid. If so, replace
225 * the path and see if we can calculate a new devid.
226 *
227 * There may be multiple names associated with a particular guid, in
228 * which case we have overlapping slices or multiple paths to the same
229 * disk. If this is the case, then we want to pick the path that is
230 * the most similar to the original, where "most similar" is the number
231 * of matching characters starting from the end of the path. This will
232 * preserve slice numbers even if the disks have been reorganized, and
233 * will also catch preferred disk names if multiple paths exist.
234 */
235 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
236 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
237 path = NULL;
238
239 matched = 0;
240 best = NULL;
241 for (ne = names; ne != NULL; ne = ne->ne_next) {
242 if (ne->ne_guid == guid) {
243 const char *src, *dst;
244 int count;
245
246 if (path == NULL) {
247 best = ne;
248 break;
249 }
250
251 src = ne->ne_name + strlen(ne->ne_name) - 1;
252 dst = path + strlen(path) - 1;
253 for (count = 0; src >= ne->ne_name && dst >= path;
254 src--, dst--, count++)
255 if (*src != *dst)
256 break;
257
258 /*
259 * At this point, 'count' is the number of characters
260 * matched from the end.
261 */
262 if (count > matched || best == NULL) {
263 best = ne;
264 matched = count;
265 }
266 }
267 }
268
269 if (best == NULL)
270 return (0);
271
272 if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
273 return (-1);
274
275 if ((devid = devid_str_from_path(best->ne_name)) == NULL) {
276 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
277 } else {
278 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
279 devid_str_free(devid);
280 return (-1);
281 }
282 devid_str_free(devid);
283 }
284
285 return (0);
286 }
287
288 /*
289 * Add the given configuration to the list of known devices.
290 */
291 static int
add_config(libpc_handle_t * hdl,pool_list_t * pl,const char * path,int order,int num_labels,nvlist_t * config)292 add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path,
293 int order, int num_labels, nvlist_t *config)
294 {
295 uint64_t pool_guid, vdev_guid, top_guid, txg, state;
296 pool_entry_t *pe;
297 vdev_entry_t *ve;
298 config_entry_t *ce;
299 name_entry_t *ne;
300
301 /*
302 * If this is a hot spare not currently in use or level 2 cache
303 * device, add it to the list of names to translate, but don't do
304 * anything else.
305 */
306 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
307 &state) == 0 &&
308 (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
309 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
310 if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
311 return (-1);
312
313 if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
314 free(ne);
315 return (-1);
316 }
317
318 ne->ne_guid = vdev_guid;
319 ne->ne_next = pl->names;
320 pl->names = ne;
321
322 return (0);
323 }
324
325 /*
326 * If we have a valid config but cannot read any of these fields, then
327 * it means we have a half-initialized label. In vdev_label_init()
328 * we write a label with txg == 0 so that we can identify the device
329 * in case the user refers to the same disk later on. If we fail to
330 * create the pool, we'll be left with a label in this state
331 * which should not be considered part of a valid pool.
332 */
333 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
334 &pool_guid) != 0 ||
335 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
336 &vdev_guid) != 0 ||
337 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
338 &top_guid) != 0 ||
339 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
340 &txg) != 0 || txg == 0) {
341 return (0);
342 }
343
344 /*
345 * First, see if we know about this pool. If not, then add it to the
346 * list of known pools.
347 */
348 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
349 if (pe->pe_guid == pool_guid)
350 break;
351 }
352
353 if (pe == NULL) {
354 if ((pe = zutil_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
355 return (-1);
356 }
357 pe->pe_guid = pool_guid;
358 pe->pe_next = pl->pools;
359 pl->pools = pe;
360 }
361
362 /*
363 * Second, see if we know about this toplevel vdev. Add it if its
364 * missing.
365 */
366 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
367 if (ve->ve_guid == top_guid)
368 break;
369 }
370
371 if (ve == NULL) {
372 if ((ve = zutil_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
373 return (-1);
374 }
375 ve->ve_guid = top_guid;
376 ve->ve_next = pe->pe_vdevs;
377 pe->pe_vdevs = ve;
378 }
379
380 /*
381 * Third, see if we have a config with a matching transaction group. If
382 * so, then we do nothing. Otherwise, add it to the list of known
383 * configs.
384 */
385 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
386 if (ce->ce_txg == txg)
387 break;
388 }
389
390 if (ce == NULL) {
391 if ((ce = zutil_alloc(hdl, sizeof (config_entry_t))) == NULL) {
392 return (-1);
393 }
394 ce->ce_txg = txg;
395 ce->ce_config = fnvlist_dup(config);
396 ce->ce_next = ve->ve_configs;
397 ve->ve_configs = ce;
398 }
399
400 /*
401 * At this point we've successfully added our config to the list of
402 * known configs. The last thing to do is add the vdev guid -> path
403 * mappings so that we can fix up the configuration as necessary before
404 * doing the import.
405 */
406 if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
407 return (-1);
408
409 if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
410 free(ne);
411 return (-1);
412 }
413
414 ne->ne_guid = vdev_guid;
415 ne->ne_next = pl->names;
416 pl->names = ne;
417
418 return (0);
419 }
420
421 /*
422 * Returns true if the named pool matches the given GUID.
423 */
424 static int
zutil_pool_active(libpc_handle_t * hdl,const char * name,uint64_t guid,boolean_t * isactive)425 zutil_pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid,
426 boolean_t *isactive)
427 {
428 ASSERT(hdl->lpc_ops->pco_pool_active != NULL);
429
430 int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name,
431 guid, isactive);
432
433 return (error);
434 }
435
436 static nvlist_t *
zutil_refresh_config(libpc_handle_t * hdl,nvlist_t * tryconfig)437 zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig)
438 {
439 ASSERT(hdl->lpc_ops->pco_refresh_config != NULL);
440
441 return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle,
442 tryconfig));
443 }
444
445 /*
446 * Determine if the vdev id is a hole in the namespace.
447 */
448 static boolean_t
vdev_is_hole(uint64_t * hole_array,uint_t holes,uint_t id)449 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
450 {
451 for (int c = 0; c < holes; c++) {
452
453 /* Top-level is a hole */
454 if (hole_array[c] == id)
455 return (B_TRUE);
456 }
457 return (B_FALSE);
458 }
459
460 /*
461 * Convert our list of pools into the definitive set of configurations. We
462 * start by picking the best config for each toplevel vdev. Once that's done,
463 * we assemble the toplevel vdevs into a full config for the pool. We make a
464 * pass to fix up any incorrect paths, and then add it to the main list to
465 * return to the user.
466 */
467 static nvlist_t *
get_configs(libpc_handle_t * hdl,pool_list_t * pl,boolean_t active_ok,nvlist_t * policy)468 get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
469 nvlist_t *policy)
470 {
471 pool_entry_t *pe;
472 vdev_entry_t *ve;
473 config_entry_t *ce;
474 nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
475 nvlist_t **spares, **l2cache;
476 uint_t i, nspares, nl2cache;
477 boolean_t config_seen;
478 uint64_t best_txg;
479 char *name, *hostname = NULL;
480 uint64_t guid;
481 uint_t children = 0;
482 nvlist_t **child = NULL;
483 uint_t holes;
484 uint64_t *hole_array, max_id;
485 uint_t c;
486 boolean_t isactive;
487 uint64_t hostid;
488 nvlist_t *nvl;
489 boolean_t found_one = B_FALSE;
490 boolean_t valid_top_config = B_FALSE;
491
492 if (nvlist_alloc(&ret, 0, 0) != 0)
493 goto nomem;
494
495 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
496 uint64_t id, max_txg = 0;
497
498 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
499 goto nomem;
500 config_seen = B_FALSE;
501
502 /*
503 * Iterate over all toplevel vdevs. Grab the pool configuration
504 * from the first one we find, and then go through the rest and
505 * add them as necessary to the 'vdevs' member of the config.
506 */
507 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
508
509 /*
510 * Determine the best configuration for this vdev by
511 * selecting the config with the latest transaction
512 * group.
513 */
514 best_txg = 0;
515 for (ce = ve->ve_configs; ce != NULL;
516 ce = ce->ce_next) {
517
518 if (ce->ce_txg > best_txg) {
519 tmp = ce->ce_config;
520 best_txg = ce->ce_txg;
521 }
522 }
523
524 /*
525 * We rely on the fact that the max txg for the
526 * pool will contain the most up-to-date information
527 * about the valid top-levels in the vdev namespace.
528 */
529 if (best_txg > max_txg) {
530 (void) nvlist_remove(config,
531 ZPOOL_CONFIG_VDEV_CHILDREN,
532 DATA_TYPE_UINT64);
533 (void) nvlist_remove(config,
534 ZPOOL_CONFIG_HOLE_ARRAY,
535 DATA_TYPE_UINT64_ARRAY);
536
537 max_txg = best_txg;
538 hole_array = NULL;
539 holes = 0;
540 max_id = 0;
541 valid_top_config = B_FALSE;
542
543 if (nvlist_lookup_uint64(tmp,
544 ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
545 verify(nvlist_add_uint64(config,
546 ZPOOL_CONFIG_VDEV_CHILDREN,
547 max_id) == 0);
548 valid_top_config = B_TRUE;
549 }
550
551 if (nvlist_lookup_uint64_array(tmp,
552 ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
553 &holes) == 0) {
554 verify(nvlist_add_uint64_array(config,
555 ZPOOL_CONFIG_HOLE_ARRAY,
556 hole_array, holes) == 0);
557 }
558 }
559
560 if (!config_seen) {
561 /*
562 * Copy the relevant pieces of data to the pool
563 * configuration:
564 *
565 * version
566 * pool guid
567 * name
568 * comment (if available)
569 * pool state
570 * hostid (if available)
571 * hostname (if available)
572 */
573 uint64_t state, version;
574 char *comment = NULL;
575
576 version = fnvlist_lookup_uint64(tmp,
577 ZPOOL_CONFIG_VERSION);
578 fnvlist_add_uint64(config,
579 ZPOOL_CONFIG_VERSION, version);
580 guid = fnvlist_lookup_uint64(tmp,
581 ZPOOL_CONFIG_POOL_GUID);
582 fnvlist_add_uint64(config,
583 ZPOOL_CONFIG_POOL_GUID, guid);
584 name = fnvlist_lookup_string(tmp,
585 ZPOOL_CONFIG_POOL_NAME);
586 fnvlist_add_string(config,
587 ZPOOL_CONFIG_POOL_NAME, name);
588
589 if (nvlist_lookup_string(tmp,
590 ZPOOL_CONFIG_COMMENT, &comment) == 0)
591 fnvlist_add_string(config,
592 ZPOOL_CONFIG_COMMENT, comment);
593
594 state = fnvlist_lookup_uint64(tmp,
595 ZPOOL_CONFIG_POOL_STATE);
596 fnvlist_add_uint64(config,
597 ZPOOL_CONFIG_POOL_STATE, state);
598
599 hostid = 0;
600 if (nvlist_lookup_uint64(tmp,
601 ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
602 fnvlist_add_uint64(config,
603 ZPOOL_CONFIG_HOSTID, hostid);
604 hostname = fnvlist_lookup_string(tmp,
605 ZPOOL_CONFIG_HOSTNAME);
606 fnvlist_add_string(config,
607 ZPOOL_CONFIG_HOSTNAME, hostname);
608 }
609
610 config_seen = B_TRUE;
611 }
612
613 /*
614 * Add this top-level vdev to the child array.
615 */
616 verify(nvlist_lookup_nvlist(tmp,
617 ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
618 verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
619 &id) == 0);
620
621 if (id >= children) {
622 nvlist_t **newchild;
623
624 newchild = zutil_alloc(hdl, (id + 1) *
625 sizeof (nvlist_t *));
626 if (newchild == NULL)
627 goto nomem;
628
629 for (c = 0; c < children; c++)
630 newchild[c] = child[c];
631
632 free(child);
633 child = newchild;
634 children = id + 1;
635 }
636 if (nvlist_dup(nvtop, &child[id], 0) != 0)
637 goto nomem;
638
639 }
640
641 /*
642 * If we have information about all the top-levels then
643 * clean up the nvlist which we've constructed. This
644 * means removing any extraneous devices that are
645 * beyond the valid range or adding devices to the end
646 * of our array which appear to be missing.
647 */
648 if (valid_top_config) {
649 if (max_id < children) {
650 for (c = max_id; c < children; c++)
651 nvlist_free(child[c]);
652 children = max_id;
653 } else if (max_id > children) {
654 nvlist_t **newchild;
655
656 newchild = zutil_alloc(hdl, (max_id) *
657 sizeof (nvlist_t *));
658 if (newchild == NULL)
659 goto nomem;
660
661 for (c = 0; c < children; c++)
662 newchild[c] = child[c];
663
664 free(child);
665 child = newchild;
666 children = max_id;
667 }
668 }
669
670 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
671 &guid) == 0);
672
673 /*
674 * The vdev namespace may contain holes as a result of
675 * device removal. We must add them back into the vdev
676 * tree before we process any missing devices.
677 */
678 if (holes > 0) {
679 ASSERT(valid_top_config);
680
681 for (c = 0; c < children; c++) {
682 nvlist_t *holey;
683
684 if (child[c] != NULL ||
685 !vdev_is_hole(hole_array, holes, c))
686 continue;
687
688 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
689 0) != 0)
690 goto nomem;
691
692 /*
693 * Holes in the namespace are treated as
694 * "hole" top-level vdevs and have a
695 * special flag set on them.
696 */
697 if (nvlist_add_string(holey,
698 ZPOOL_CONFIG_TYPE,
699 VDEV_TYPE_HOLE) != 0 ||
700 nvlist_add_uint64(holey,
701 ZPOOL_CONFIG_ID, c) != 0 ||
702 nvlist_add_uint64(holey,
703 ZPOOL_CONFIG_GUID, 0ULL) != 0) {
704 nvlist_free(holey);
705 goto nomem;
706 }
707 child[c] = holey;
708 }
709 }
710
711 /*
712 * Look for any missing top-level vdevs. If this is the case,
713 * create a faked up 'missing' vdev as a placeholder. We cannot
714 * simply compress the child array, because the kernel performs
715 * certain checks to make sure the vdev IDs match their location
716 * in the configuration.
717 */
718 for (c = 0; c < children; c++) {
719 if (child[c] == NULL) {
720 nvlist_t *missing;
721 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
722 0) != 0)
723 goto nomem;
724 if (nvlist_add_string(missing,
725 ZPOOL_CONFIG_TYPE,
726 VDEV_TYPE_MISSING) != 0 ||
727 nvlist_add_uint64(missing,
728 ZPOOL_CONFIG_ID, c) != 0 ||
729 nvlist_add_uint64(missing,
730 ZPOOL_CONFIG_GUID, 0ULL) != 0) {
731 nvlist_free(missing);
732 goto nomem;
733 }
734 child[c] = missing;
735 }
736 }
737
738 /*
739 * Put all of this pool's top-level vdevs into a root vdev.
740 */
741 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
742 goto nomem;
743 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
744 VDEV_TYPE_ROOT) != 0 ||
745 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
746 nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
747 nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
748 child, children) != 0) {
749 nvlist_free(nvroot);
750 goto nomem;
751 }
752
753 for (c = 0; c < children; c++)
754 nvlist_free(child[c]);
755 free(child);
756 children = 0;
757 child = NULL;
758
759 /*
760 * Go through and fix up any paths and/or devids based on our
761 * known list of vdev GUID -> path mappings.
762 */
763 if (fix_paths(nvroot, pl->names) != 0) {
764 nvlist_free(nvroot);
765 goto nomem;
766 }
767
768 /*
769 * Add the root vdev to this pool's configuration.
770 */
771 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
772 nvroot) != 0) {
773 nvlist_free(nvroot);
774 goto nomem;
775 }
776 nvlist_free(nvroot);
777
778 /*
779 * zdb uses this path to report on active pools that were
780 * imported or created using -R.
781 */
782 if (active_ok)
783 goto add_pool;
784
785 /*
786 * Determine if this pool is currently active, in which case we
787 * can't actually import it.
788 */
789 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
790 &name) == 0);
791 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
792 &guid) == 0);
793
794 if (zutil_pool_active(hdl, name, guid, &isactive) != 0)
795 goto error;
796
797 if (isactive) {
798 nvlist_free(config);
799 config = NULL;
800 continue;
801 }
802
803 if (policy != NULL) {
804 if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
805 policy) != 0)
806 goto nomem;
807 }
808
809 if ((nvl = zutil_refresh_config(hdl, config)) == NULL) {
810 nvlist_free(config);
811 config = NULL;
812 continue;
813 }
814
815 nvlist_free(config);
816 config = nvl;
817
818 /*
819 * Go through and update the paths for spares, now that we have
820 * them.
821 */
822 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
823 &nvroot) == 0);
824 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
825 &spares, &nspares) == 0) {
826 for (i = 0; i < nspares; i++) {
827 if (fix_paths(spares[i], pl->names) != 0)
828 goto nomem;
829 }
830 }
831
832 /*
833 * Update the paths for l2cache devices.
834 */
835 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
836 &l2cache, &nl2cache) == 0) {
837 for (i = 0; i < nl2cache; i++) {
838 if (fix_paths(l2cache[i], pl->names) != 0)
839 goto nomem;
840 }
841 }
842
843 /*
844 * Restore the original information read from the actual label.
845 */
846 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
847 DATA_TYPE_UINT64);
848 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
849 DATA_TYPE_STRING);
850 if (hostid != 0) {
851 verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
852 hostid) == 0);
853 verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
854 hostname) == 0);
855 }
856
857 add_pool:
858 /*
859 * Add this pool to the list of configs.
860 */
861 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
862 &name) == 0);
863 if (nvlist_add_nvlist(ret, name, config) != 0)
864 goto nomem;
865
866 found_one = B_TRUE;
867 nvlist_free(config);
868 config = NULL;
869 }
870
871 if (!found_one) {
872 nvlist_free(ret);
873 ret = NULL;
874 }
875
876 return (ret);
877
878 nomem:
879 (void) zutil_no_memory(hdl);
880 error:
881 nvlist_free(config);
882 nvlist_free(ret);
883 for (c = 0; c < children; c++)
884 nvlist_free(child[c]);
885 free(child);
886
887 return (NULL);
888 }
889
890 /*
891 * Return the offset of the given label.
892 */
893 static uint64_t
label_offset(uint64_t size,int l)894 label_offset(uint64_t size, int l)
895 {
896 ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
897 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
898 0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
899 }
900
901 /*
902 * Given a file descriptor, read the label information and return an nvlist
903 * describing the configuration, if there is one. The number of valid
904 * labels found will be returned in num_labels when non-NULL.
905 */
906 int
zpool_read_label(int fd,nvlist_t ** config,int * num_labels)907 zpool_read_label(int fd, nvlist_t **config, int *num_labels)
908 {
909 struct stat64 statbuf;
910 int l, count = 0;
911 vdev_label_t *label;
912 nvlist_t *expected_config = NULL;
913 uint64_t expected_guid = 0, size;
914
915 *config = NULL;
916
917 if (num_labels != NULL)
918 *num_labels = 0;
919
920 if (fstat64(fd, &statbuf) == -1)
921 return (0);
922 size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
923
924 if ((label = malloc(sizeof (vdev_label_t))) == NULL)
925 return (-1);
926
927 for (l = 0; l < VDEV_LABELS; l++) {
928 uint64_t state, guid, txg;
929
930 if (pread64(fd, label, sizeof (vdev_label_t),
931 label_offset(size, l)) != sizeof (vdev_label_t))
932 continue;
933
934 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
935 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
936 continue;
937
938 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
939 &guid) != 0 || guid == 0) {
940 nvlist_free(*config);
941 continue;
942 }
943
944 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
945 &state) != 0 || state > POOL_STATE_L2CACHE) {
946 nvlist_free(*config);
947 continue;
948 }
949
950 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
951 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
952 &txg) != 0 || txg == 0)) {
953 nvlist_free(*config);
954 continue;
955 }
956
957 if (expected_guid) {
958 if (expected_guid == guid)
959 count++;
960
961 nvlist_free(*config);
962 } else {
963 expected_config = *config;
964 expected_guid = guid;
965 count++;
966 }
967 }
968
969 if (num_labels != NULL)
970 *num_labels = count;
971
972 free(label);
973 *config = expected_config;
974
975 return (0);
976 }
977
978 static int
slice_cache_compare(const void * arg1,const void * arg2)979 slice_cache_compare(const void *arg1, const void *arg2)
980 {
981 const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
982 const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
983 char *nm1slice, *nm2slice;
984 int rv;
985
986 /*
987 * slices zero and two are the most likely to provide results,
988 * so put those first
989 */
990 nm1slice = strstr(nm1, "s0");
991 nm2slice = strstr(nm2, "s0");
992 if (nm1slice && !nm2slice) {
993 return (-1);
994 }
995 if (!nm1slice && nm2slice) {
996 return (1);
997 }
998 nm1slice = strstr(nm1, "s2");
999 nm2slice = strstr(nm2, "s2");
1000 if (nm1slice && !nm2slice) {
1001 return (-1);
1002 }
1003 if (!nm1slice && nm2slice) {
1004 return (1);
1005 }
1006
1007 rv = strcmp(nm1, nm2);
1008 if (rv == 0)
1009 return (0);
1010 return (rv > 0 ? 1 : -1);
1011 }
1012
1013 static void
check_one_slice(avl_tree_t * r,char * diskname,uint_t partno,diskaddr_t size,uint_t blksz)1014 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
1015 diskaddr_t size, uint_t blksz)
1016 {
1017 rdsk_node_t tmpnode;
1018 rdsk_node_t *node;
1019 char sname[MAXNAMELEN];
1020
1021 tmpnode.rn_name = &sname[0];
1022 (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
1023 diskname, partno);
1024 /*
1025 * protect against division by zero for disk labels that
1026 * contain a bogus sector size
1027 */
1028 if (blksz == 0)
1029 blksz = DEV_BSIZE;
1030 /* too small to contain a zpool? */
1031 if ((size < (SPA_MINDEVSIZE / blksz)) &&
1032 (node = avl_find(r, &tmpnode, NULL)))
1033 node->rn_nozpool = B_TRUE;
1034 }
1035
1036 static void
nozpool_all_slices(avl_tree_t * r,const char * sname)1037 nozpool_all_slices(avl_tree_t *r, const char *sname)
1038 {
1039 char diskname[MAXNAMELEN];
1040 char *ptr;
1041 int i;
1042
1043 (void) strncpy(diskname, sname, MAXNAMELEN);
1044 if (((ptr = strrchr(diskname, 's')) == NULL) &&
1045 ((ptr = strrchr(diskname, 'p')) == NULL))
1046 return;
1047 ptr[0] = 's';
1048 ptr[1] = '\0';
1049 for (i = 0; i < NDKMAP; i++)
1050 check_one_slice(r, diskname, i, 0, 1);
1051 ptr[0] = 'p';
1052 for (i = 0; i <= FD_NUMPART; i++)
1053 check_one_slice(r, diskname, i, 0, 1);
1054 }
1055
1056 static void
check_slices(avl_tree_t * r,int fd,const char * sname)1057 check_slices(avl_tree_t *r, int fd, const char *sname)
1058 {
1059 struct extvtoc vtoc;
1060 struct dk_gpt *gpt;
1061 char diskname[MAXNAMELEN];
1062 char *ptr;
1063 int i;
1064
1065 (void) strncpy(diskname, sname, MAXNAMELEN);
1066 if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1067 return;
1068 ptr[1] = '\0';
1069
1070 if (read_extvtoc(fd, &vtoc) >= 0) {
1071 for (i = 0; i < NDKMAP; i++)
1072 check_one_slice(r, diskname, i,
1073 vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1074 } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1075 /*
1076 * on x86 we'll still have leftover links that point
1077 * to slices s[9-15], so use NDKMAP instead
1078 */
1079 for (i = 0; i < NDKMAP; i++)
1080 check_one_slice(r, diskname, i,
1081 gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1082 /* nodes p[1-4] are never used with EFI labels */
1083 ptr[0] = 'p';
1084 for (i = 1; i <= FD_NUMPART; i++)
1085 check_one_slice(r, diskname, i, 0, 1);
1086 efi_free(gpt);
1087 }
1088 }
1089
1090 void
zpool_open_func(void * arg)1091 zpool_open_func(void *arg)
1092 {
1093 rdsk_node_t *rn = arg;
1094 struct stat64 statbuf;
1095 nvlist_t *config;
1096 int error;
1097 int num_labels = 0;
1098 int fd;
1099
1100 if (rn->rn_nozpool)
1101 return;
1102 if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1103 /* symlink to a device that's no longer there */
1104 if (errno == ENOENT)
1105 nozpool_all_slices(rn->rn_avl, rn->rn_name);
1106 return;
1107 }
1108 /*
1109 * Ignore failed stats. We only want regular
1110 * files, character devs and block devs.
1111 */
1112 if (fstat64(fd, &statbuf) != 0 ||
1113 (!S_ISREG(statbuf.st_mode) &&
1114 !S_ISCHR(statbuf.st_mode) &&
1115 !S_ISBLK(statbuf.st_mode))) {
1116 (void) close(fd);
1117 return;
1118 }
1119 /* this file is too small to hold a zpool */
1120 if (S_ISREG(statbuf.st_mode) &&
1121 statbuf.st_size < SPA_MINDEVSIZE) {
1122 (void) close(fd);
1123 return;
1124 } else if (!S_ISREG(statbuf.st_mode)) {
1125 /*
1126 * Try to read the disk label first so we don't have to
1127 * open a bunch of minor nodes that can't have a zpool.
1128 */
1129 check_slices(rn->rn_avl, fd, rn->rn_name);
1130 }
1131
1132 error = zpool_read_label(fd, &config, &num_labels);
1133 if (error != 0) {
1134 (void) close(fd);
1135 return;
1136 }
1137
1138 if (num_labels == 0) {
1139 (void) close(fd);
1140 nvlist_free(config);
1141 return;
1142 }
1143
1144 (void) close(fd);
1145
1146 rn->rn_config = config;
1147 rn->rn_num_labels = num_labels;
1148 }
1149
1150 /*
1151 * Given a list of directories to search, find all pools stored on disk. This
1152 * includes partial pools which are not available to import. If no args are
1153 * given (argc is 0), then the default directory (/dev/dsk) is searched.
1154 * poolname or guid (but not both) are provided by the caller when trying
1155 * to import a specific pool.
1156 */
1157 static nvlist_t *
zpool_find_import_impl(libpc_handle_t * hdl,importargs_t * iarg)1158 zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg)
1159 {
1160 int i, dirs = iarg->paths;
1161 struct dirent64 *dp;
1162 char path[MAXPATHLEN];
1163 char *end, **dir = iarg->path;
1164 size_t pathleft;
1165 nvlist_t *ret = NULL;
1166 static char *default_dir = ZFS_DISK_ROOT;
1167 pool_list_t pools = { 0 };
1168 pool_entry_t *pe, *penext;
1169 vdev_entry_t *ve, *venext;
1170 config_entry_t *ce, *cenext;
1171 name_entry_t *ne, *nenext;
1172 avl_tree_t slice_cache;
1173 rdsk_node_t *slice;
1174 void *cookie;
1175
1176 if (dirs == 0) {
1177 dirs = 1;
1178 dir = &default_dir;
1179 }
1180
1181 /*
1182 * Go through and read the label configuration information from every
1183 * possible device, organizing the information according to pool GUID
1184 * and toplevel GUID.
1185 */
1186 for (i = 0; i < dirs; i++) {
1187 tpool_t *t;
1188 char rdsk[MAXPATHLEN];
1189 int dfd;
1190 boolean_t config_failed = B_FALSE;
1191 DIR *dirp;
1192
1193 /* use realpath to normalize the path */
1194 if (realpath(dir[i], path) == 0) {
1195 (void) zutil_error_fmt(hdl, EZFS_BADPATH,
1196 dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1197 goto error;
1198 }
1199 end = &path[strlen(path)];
1200 *end++ = '/';
1201 *end = 0;
1202 pathleft = &path[sizeof (path)] - end;
1203
1204 /*
1205 * Using raw devices instead of block devices when we're
1206 * reading the labels skips a bunch of slow operations during
1207 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1208 */
1209 if (strcmp(path, ZFS_DISK_ROOTD) == 0)
1210 (void) strlcpy(rdsk, ZFS_RDISK_ROOTD, sizeof (rdsk));
1211 else
1212 (void) strlcpy(rdsk, path, sizeof (rdsk));
1213
1214 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1215 (dirp = fdopendir(dfd)) == NULL) {
1216 if (dfd >= 0)
1217 (void) close(dfd);
1218 zutil_error_aux(hdl, strerror(errno));
1219 (void) zutil_error_fmt(hdl, EZFS_BADPATH,
1220 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1221 rdsk);
1222 goto error;
1223 }
1224
1225 avl_create(&slice_cache, slice_cache_compare,
1226 sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1227 /*
1228 * This is not MT-safe, but we have no MT consumers of libzutil
1229 */
1230 while ((dp = readdir64(dirp)) != NULL) {
1231 const char *name = dp->d_name;
1232 if (name[0] == '.' &&
1233 (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1234 continue;
1235
1236 slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
1237 slice->rn_name = zutil_strdup(hdl, name);
1238 slice->rn_avl = &slice_cache;
1239 slice->rn_dfd = dfd;
1240 slice->rn_hdl = hdl;
1241 slice->rn_nozpool = B_FALSE;
1242 avl_add(&slice_cache, slice);
1243 }
1244 /*
1245 * create a thread pool to do all of this in parallel;
1246 * rn_nozpool is not protected, so this is racy in that
1247 * multiple tasks could decide that the same slice can
1248 * not hold a zpool, which is benign. Also choose
1249 * double the number of processors; we hold a lot of
1250 * locks in the kernel, so going beyond this doesn't
1251 * buy us much.
1252 */
1253 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1254 0, NULL);
1255 for (slice = avl_first(&slice_cache); slice;
1256 (slice = avl_walk(&slice_cache, slice,
1257 AVL_AFTER)))
1258 (void) tpool_dispatch(t, zpool_open_func, slice);
1259 tpool_wait(t);
1260 tpool_destroy(t);
1261
1262 cookie = NULL;
1263 while ((slice = avl_destroy_nodes(&slice_cache,
1264 &cookie)) != NULL) {
1265 if (slice->rn_config != NULL && !config_failed) {
1266 nvlist_t *config = slice->rn_config;
1267 boolean_t matched = B_TRUE;
1268
1269 if (iarg->poolname != NULL) {
1270 char *pname;
1271
1272 matched = nvlist_lookup_string(config,
1273 ZPOOL_CONFIG_POOL_NAME,
1274 &pname) == 0 &&
1275 strcmp(iarg->poolname, pname) == 0;
1276 } else if (iarg->guid != 0) {
1277 uint64_t this_guid;
1278
1279 matched = nvlist_lookup_uint64(config,
1280 ZPOOL_CONFIG_POOL_GUID,
1281 &this_guid) == 0 &&
1282 iarg->guid == this_guid;
1283 }
1284 if (matched) {
1285 /*
1286 * use the non-raw path for the config
1287 */
1288 (void) strlcpy(end, slice->rn_name,
1289 pathleft);
1290 (void) add_config(hdl, &pools,
1291 path, slice->rn_order,
1292 slice->rn_num_labels, config);
1293 }
1294 nvlist_free(config);
1295 }
1296 free(slice->rn_name);
1297 free(slice);
1298 }
1299 avl_destroy(&slice_cache);
1300
1301 (void) closedir(dirp);
1302
1303 if (config_failed)
1304 goto error;
1305 }
1306
1307 ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
1308
1309 error:
1310 for (pe = pools.pools; pe != NULL; pe = penext) {
1311 penext = pe->pe_next;
1312 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1313 venext = ve->ve_next;
1314 for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1315 cenext = ce->ce_next;
1316 nvlist_free(ce->ce_config);
1317 free(ce);
1318 }
1319 free(ve);
1320 }
1321 free(pe);
1322 }
1323
1324 for (ne = pools.names; ne != NULL; ne = nenext) {
1325 nenext = ne->ne_next;
1326 free(ne->ne_name);
1327 free(ne);
1328 }
1329
1330 return (ret);
1331 }
1332
1333 /*
1334 * Given a cache file, return the contents as a list of importable pools.
1335 * poolname or guid (but not both) are provided by the caller when trying
1336 * to import a specific pool.
1337 */
1338 static nvlist_t *
zpool_find_import_cached(libpc_handle_t * hdl,const char * cachefile,const char * poolname,uint64_t guid)1339 zpool_find_import_cached(libpc_handle_t *hdl, const char *cachefile,
1340 const char *poolname, uint64_t guid)
1341 {
1342 char *buf;
1343 int fd;
1344 struct stat64 statbuf;
1345 nvlist_t *raw, *src, *dst;
1346 nvlist_t *pools;
1347 nvpair_t *elem;
1348 char *name;
1349 uint64_t this_guid;
1350 boolean_t active;
1351
1352 verify(poolname == NULL || guid == 0);
1353
1354 if ((fd = open(cachefile, O_RDONLY)) < 0) {
1355 zutil_error_aux(hdl, "%s", strerror(errno));
1356 (void) zutil_error(hdl, EZFS_BADCACHE,
1357 dgettext(TEXT_DOMAIN, "failed to open cache file"));
1358 return (NULL);
1359 }
1360
1361 if (fstat64(fd, &statbuf) != 0) {
1362 zutil_error_aux(hdl, "%s", strerror(errno));
1363 (void) close(fd);
1364 (void) zutil_error(hdl, EZFS_BADCACHE,
1365 dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1366 return (NULL);
1367 }
1368
1369 if ((buf = zutil_alloc(hdl, statbuf.st_size)) == NULL) {
1370 (void) close(fd);
1371 return (NULL);
1372 }
1373
1374 if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1375 (void) close(fd);
1376 free(buf);
1377 (void) zutil_error(hdl, EZFS_BADCACHE,
1378 dgettext(TEXT_DOMAIN,
1379 "failed to read cache file contents"));
1380 return (NULL);
1381 }
1382
1383 (void) close(fd);
1384
1385 if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1386 free(buf);
1387 (void) zutil_error(hdl, EZFS_BADCACHE,
1388 dgettext(TEXT_DOMAIN,
1389 "invalid or corrupt cache file contents"));
1390 return (NULL);
1391 }
1392
1393 free(buf);
1394
1395 /*
1396 * Go through and get the current state of the pools and refresh their
1397 * state.
1398 */
1399 if (nvlist_alloc(&pools, 0, 0) != 0) {
1400 (void) zutil_no_memory(hdl);
1401 nvlist_free(raw);
1402 return (NULL);
1403 }
1404
1405 elem = NULL;
1406 while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1407 src = fnvpair_value_nvlist(elem);
1408
1409 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1410 if (poolname != NULL && strcmp(poolname, name) != 0)
1411 continue;
1412
1413 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1414 if (guid != 0 && guid != this_guid)
1415 continue;
1416
1417 if (zutil_pool_active(hdl, name, this_guid, &active) != 0) {
1418 nvlist_free(raw);
1419 nvlist_free(pools);
1420 return (NULL);
1421 }
1422
1423 if (active)
1424 continue;
1425
1426 if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
1427 cachefile) != 0) {
1428 (void) zutil_no_memory(hdl);
1429 nvlist_free(raw);
1430 nvlist_free(pools);
1431 return (NULL);
1432 }
1433
1434 if ((dst = zutil_refresh_config(hdl, src)) == NULL) {
1435 nvlist_free(raw);
1436 nvlist_free(pools);
1437 return (NULL);
1438 }
1439
1440 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1441 (void) zutil_no_memory(hdl);
1442 nvlist_free(dst);
1443 nvlist_free(raw);
1444 nvlist_free(pools);
1445 return (NULL);
1446 }
1447 nvlist_free(dst);
1448 }
1449
1450 nvlist_free(raw);
1451 return (pools);
1452 }
1453
1454 nvlist_t *
zpool_search_import(void * hdl,importargs_t * import,const pool_config_ops_t * pco)1455 zpool_search_import(void *hdl, importargs_t *import,
1456 const pool_config_ops_t *pco)
1457 {
1458 libpc_handle_t handle = { 0 };
1459 nvlist_t *pools = NULL;
1460
1461 handle.lpc_lib_handle = hdl;
1462 handle.lpc_ops = pco;
1463 handle.lpc_printerr = B_TRUE;
1464
1465 verify(import->poolname == NULL || import->guid == 0);
1466
1467 if (import->cachefile != NULL)
1468 pools = zpool_find_import_cached(&handle, import->cachefile,
1469 import->poolname, import->guid);
1470 else
1471 pools = zpool_find_import_impl(&handle, import);
1472
1473 if ((pools == NULL || nvlist_empty(pools)) &&
1474 handle.lpc_open_access_error && geteuid() != 0) {
1475 (void) zutil_error(&handle, EZFS_EACESS, dgettext(TEXT_DOMAIN,
1476 "no pools found"));
1477 }
1478
1479 return (pools);
1480 }
1481
1482 static boolean_t
pool_match(nvlist_t * cfg,char * tgt)1483 pool_match(nvlist_t *cfg, char *tgt)
1484 {
1485 uint64_t v, guid = strtoull(tgt, NULL, 0);
1486 char *s;
1487
1488 if (guid != 0) {
1489 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
1490 return (v == guid);
1491 } else {
1492 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
1493 return (strcmp(s, tgt) == 0);
1494 }
1495 return (B_FALSE);
1496 }
1497
1498 int
zpool_find_config(void * hdl,const char * target,nvlist_t ** configp,importargs_t * args,const pool_config_ops_t * pco)1499 zpool_find_config(void *hdl, const char *target, nvlist_t **configp,
1500 importargs_t *args, const pool_config_ops_t *pco)
1501 {
1502 nvlist_t *pools;
1503 nvlist_t *match = NULL;
1504 nvlist_t *config = NULL;
1505 char *sepp = NULL;
1506 int count = 0;
1507 char *targetdup = strdup(target);
1508
1509 *configp = NULL;
1510
1511 if ((sepp = strpbrk(targetdup, "/@")) != NULL) {
1512 *sepp = '\0';
1513 }
1514
1515 pools = zpool_search_import(hdl, args, pco);
1516
1517 if (pools != NULL) {
1518 nvpair_t *elem = NULL;
1519 while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
1520 VERIFY0(nvpair_value_nvlist(elem, &config));
1521 if (pool_match(config, targetdup)) {
1522 count++;
1523 if (match != NULL) {
1524 /* multiple matches found */
1525 continue;
1526 } else {
1527 match = config;
1528 }
1529 }
1530 }
1531 }
1532
1533 if (count == 0) {
1534 free(targetdup);
1535 return (ENOENT);
1536 }
1537
1538 if (count > 1) {
1539 free(targetdup);
1540 return (EINVAL);
1541 }
1542
1543 *configp = match;
1544 free(targetdup);
1545
1546 return (0);
1547 }
1548