libzfs_mount.c revision eb633035c80613ec93d62f90482837adaaf21a0a
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2014, 2017 by Delphix. All rights reserved.
26 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
27 * Copyright 2017 Joyent, Inc.
28 * Copyright 2017 RackTop Systems.
29 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
30 */
31
32/*
33 * Routines to manage ZFS mounts.  We separate all the nasty routines that have
34 * to deal with the OS.  The following functions are the main entry points --
35 * they are used by mount and unmount and when changing a filesystem's
36 * mountpoint.
37 *
38 *	zfs_is_mounted()
39 *	zfs_mount()
40 *	zfs_unmount()
41 *	zfs_unmountall()
42 *
43 * This file also contains the functions used to manage sharing filesystems via
44 * NFS and iSCSI:
45 *
46 *	zfs_is_shared()
47 *	zfs_share()
48 *	zfs_unshare()
49 *
50 *	zfs_is_shared_nfs()
51 *	zfs_is_shared_smb()
52 *	zfs_share_proto()
53 *	zfs_shareall();
54 *	zfs_unshare_nfs()
55 *	zfs_unshare_smb()
56 *	zfs_unshareall_nfs()
57 *	zfs_unshareall_smb()
58 *	zfs_unshareall()
59 *	zfs_unshareall_bypath()
60 *
61 * The following functions are available for pool consumers, and will
62 * mount/unmount and share/unshare all datasets within pool:
63 *
64 *	zpool_enable_datasets()
65 *	zpool_disable_datasets()
66 */
67
68#include <dirent.h>
69#include <dlfcn.h>
70#include <errno.h>
71#include <fcntl.h>
72#include <libgen.h>
73#include <libintl.h>
74#include <stdio.h>
75#include <stdlib.h>
76#include <strings.h>
77#include <unistd.h>
78#include <zone.h>
79#include <sys/mntent.h>
80#include <sys/mount.h>
81#include <sys/stat.h>
82#include <sys/statvfs.h>
83#include <sys/dsl_crypt.h>
84
85#include <libzfs.h>
86
87#include "libzfs_impl.h"
88#include "libzfs_taskq.h"
89
90#include <libshare.h>
91#include <sys/systeminfo.h>
92#define	MAXISALEN	257	/* based on sysinfo(2) man page */
93
94static int mount_tq_nthr = 512;	/* taskq threads for multi-threaded mounting */
95
96static void zfs_mount_task(void *);
97static int zfs_share_proto(zfs_handle_t *, zfs_share_proto_t *);
98zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
99    zfs_share_proto_t);
100
101/*
102 * The share protocols table must be in the same order as the zfs_share_proto_t
103 * enum in libzfs_impl.h
104 */
105typedef struct {
106	zfs_prop_t p_prop;
107	char *p_name;
108	int p_share_err;
109	int p_unshare_err;
110} proto_table_t;
111
112proto_table_t proto_table[PROTO_END] = {
113	{ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
114	{ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
115};
116
117zfs_share_proto_t nfs_only[] = {
118	PROTO_NFS,
119	PROTO_END
120};
121
122zfs_share_proto_t smb_only[] = {
123	PROTO_SMB,
124	PROTO_END
125};
126zfs_share_proto_t share_all_proto[] = {
127	PROTO_NFS,
128	PROTO_SMB,
129	PROTO_END
130};
131
132/*
133 * Search the sharetab for the given mountpoint and protocol, returning
134 * a zfs_share_type_t value.
135 */
136static zfs_share_type_t
137is_shared(libzfs_handle_t *hdl, const char *mountpoint, zfs_share_proto_t proto)
138{
139	char buf[MAXPATHLEN], *tab;
140	char *ptr;
141
142	if (hdl->libzfs_sharetab == NULL)
143		return (SHARED_NOT_SHARED);
144
145	(void) fseek(hdl->libzfs_sharetab, 0, SEEK_SET);
146
147	while (fgets(buf, sizeof (buf), hdl->libzfs_sharetab) != NULL) {
148
149		/* the mountpoint is the first entry on each line */
150		if ((tab = strchr(buf, '\t')) == NULL)
151			continue;
152
153		*tab = '\0';
154		if (strcmp(buf, mountpoint) == 0) {
155			/*
156			 * the protocol field is the third field
157			 * skip over second field
158			 */
159			ptr = ++tab;
160			if ((tab = strchr(ptr, '\t')) == NULL)
161				continue;
162			ptr = ++tab;
163			if ((tab = strchr(ptr, '\t')) == NULL)
164				continue;
165			*tab = '\0';
166			if (strcmp(ptr,
167			    proto_table[proto].p_name) == 0) {
168				switch (proto) {
169				case PROTO_NFS:
170					return (SHARED_NFS);
171				case PROTO_SMB:
172					return (SHARED_SMB);
173				default:
174					return (0);
175				}
176			}
177		}
178	}
179
180	return (SHARED_NOT_SHARED);
181}
182
183static boolean_t
184dir_is_empty_stat(const char *dirname)
185{
186	struct stat st;
187
188	/*
189	 * We only want to return false if the given path is a non empty
190	 * directory, all other errors are handled elsewhere.
191	 */
192	if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
193		return (B_TRUE);
194	}
195
196	/*
197	 * An empty directory will still have two entries in it, one
198	 * entry for each of "." and "..".
199	 */
200	if (st.st_size > 2) {
201		return (B_FALSE);
202	}
203
204	return (B_TRUE);
205}
206
207static boolean_t
208dir_is_empty_readdir(const char *dirname)
209{
210	DIR *dirp;
211	struct dirent64 *dp;
212	int dirfd;
213
214	if ((dirfd = openat(AT_FDCWD, dirname,
215	    O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
216		return (B_TRUE);
217	}
218
219	if ((dirp = fdopendir(dirfd)) == NULL) {
220		(void) close(dirfd);
221		return (B_TRUE);
222	}
223
224	while ((dp = readdir64(dirp)) != NULL) {
225
226		if (strcmp(dp->d_name, ".") == 0 ||
227		    strcmp(dp->d_name, "..") == 0)
228			continue;
229
230		(void) closedir(dirp);
231		return (B_FALSE);
232	}
233
234	(void) closedir(dirp);
235	return (B_TRUE);
236}
237
238/*
239 * Returns true if the specified directory is empty.  If we can't open the
240 * directory at all, return true so that the mount can fail with a more
241 * informative error message.
242 */
243static boolean_t
244dir_is_empty(const char *dirname)
245{
246	struct statvfs64 st;
247
248	/*
249	 * If the statvfs call fails or the filesystem is not a ZFS
250	 * filesystem, fall back to the slow path which uses readdir.
251	 */
252	if ((statvfs64(dirname, &st) != 0) ||
253	    (strcmp(st.f_basetype, "zfs") != 0)) {
254		return (dir_is_empty_readdir(dirname));
255	}
256
257	/*
258	 * At this point, we know the provided path is on a ZFS
259	 * filesystem, so we can use stat instead of readdir to
260	 * determine if the directory is empty or not. We try to avoid
261	 * using readdir because that requires opening "dirname"; this
262	 * open file descriptor can potentially end up in a child
263	 * process if there's a concurrent fork, thus preventing the
264	 * zfs_mount() from otherwise succeeding (the open file
265	 * descriptor inherited by the child process will cause the
266	 * parent's mount to fail with EBUSY). The performance
267	 * implications of replacing the open, read, and close with a
268	 * single stat is nice; but is not the main motivation for the
269	 * added complexity.
270	 */
271	return (dir_is_empty_stat(dirname));
272}
273
274/*
275 * Checks to see if the mount is active.  If the filesystem is mounted, we fill
276 * in 'where' with the current mountpoint, and return 1.  Otherwise, we return
277 * 0.
278 */
279boolean_t
280is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
281{
282	struct mnttab entry;
283
284	if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
285		return (B_FALSE);
286
287	if (where != NULL)
288		*where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
289
290	return (B_TRUE);
291}
292
293boolean_t
294zfs_is_mounted(zfs_handle_t *zhp, char **where)
295{
296	return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
297}
298
299/*
300 * Returns true if the given dataset is mountable, false otherwise.  Returns the
301 * mountpoint in 'buf'.
302 */
303static boolean_t
304zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
305    zprop_source_t *source)
306{
307	char sourceloc[MAXNAMELEN];
308	zprop_source_t sourcetype;
309
310	if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type))
311		return (B_FALSE);
312
313	verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
314	    &sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
315
316	if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
317	    strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
318		return (B_FALSE);
319
320	if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
321		return (B_FALSE);
322
323	if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
324	    getzoneid() == GLOBAL_ZONEID)
325		return (B_FALSE);
326
327	if (source)
328		*source = sourcetype;
329
330	return (B_TRUE);
331}
332
333/*
334 * Mount the given filesystem.
335 */
336int
337zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
338{
339	struct stat buf;
340	char mountpoint[ZFS_MAXPROPLEN];
341	char mntopts[MNT_LINE_MAX];
342	libzfs_handle_t *hdl = zhp->zfs_hdl;
343	uint64_t keystatus;
344	int rc;
345
346	if (options == NULL)
347		mntopts[0] = '\0';
348	else
349		(void) strlcpy(mntopts, options, sizeof (mntopts));
350
351	/*
352	 * If the pool is imported read-only then all mounts must be read-only
353	 */
354	if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
355		flags |= MS_RDONLY;
356
357	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
358		return (0);
359
360	/*
361	 * If the filesystem is encrypted the key must be loaded  in order to
362	 * mount. If the key isn't loaded, the MS_CRYPT flag decides whether
363	 * or not we attempt to load the keys. Note: we must call
364	 * zfs_refresh_properties() here since some callers of this function
365	 * (most notably zpool_enable_datasets()) may implicitly load our key
366	 * by loading the parent's key first.
367	 */
368	if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
369		zfs_refresh_properties(zhp);
370		keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
371
372		/*
373		 * If the key is unavailable and MS_CRYPT is set give the
374		 * user a chance to enter the key. Otherwise just fail
375		 * immediately.
376		 */
377		if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
378			if (flags & MS_CRYPT) {
379				rc = zfs_crypto_load_key(zhp, B_FALSE, NULL);
380				if (rc != 0)
381					return (rc);
382			} else {
383				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
384				    "encryption key not loaded"));
385				return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
386				    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
387				    mountpoint));
388			}
389		}
390
391	}
392
393	/* Create the directory if it doesn't already exist */
394	if (lstat(mountpoint, &buf) != 0) {
395		if (mkdirp(mountpoint, 0755) != 0) {
396			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
397			    "failed to create mountpoint"));
398			return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
399			    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
400			    mountpoint));
401		}
402	}
403
404	/*
405	 * Determine if the mountpoint is empty.  If so, refuse to perform the
406	 * mount.  We don't perform this check if MS_OVERLAY is specified, which
407	 * would defeat the point.  We also avoid this check if 'remount' is
408	 * specified.
409	 */
410	if ((flags & MS_OVERLAY) == 0 &&
411	    strstr(mntopts, MNTOPT_REMOUNT) == NULL &&
412	    !dir_is_empty(mountpoint)) {
413		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
414		    "directory is not empty"));
415		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
416		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
417	}
418
419	/* perform the mount */
420	if (mount(zfs_get_name(zhp), mountpoint, MS_OPTIONSTR | flags,
421	    MNTTYPE_ZFS, NULL, 0, mntopts, sizeof (mntopts)) != 0) {
422		/*
423		 * Generic errors are nasty, but there are just way too many
424		 * from mount(), and they're well-understood.  We pick a few
425		 * common ones to improve upon.
426		 */
427		if (errno == EBUSY) {
428			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
429			    "mountpoint or dataset is busy"));
430		} else if (errno == EPERM) {
431			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
432			    "Insufficient privileges"));
433		} else if (errno == ENOTSUP) {
434			char buf[256];
435			int spa_version;
436
437			VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
438			(void) snprintf(buf, sizeof (buf),
439			    dgettext(TEXT_DOMAIN, "Can't mount a version %lld "
440			    "file system on a version %d pool. Pool must be"
441			    " upgraded to mount this file system."),
442			    (u_longlong_t)zfs_prop_get_int(zhp,
443			    ZFS_PROP_VERSION), spa_version);
444			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, buf));
445		} else {
446			zfs_error_aux(hdl, strerror(errno));
447		}
448		return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
449		    dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
450		    zhp->zfs_name));
451	}
452
453	/* add the mounted entry into our cache */
454	libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint,
455	    mntopts);
456	return (0);
457}
458
459/*
460 * Unmount a single filesystem.
461 */
462static int
463unmount_one(libzfs_handle_t *hdl, const char *mountpoint, int flags)
464{
465	if (umount2(mountpoint, flags) != 0) {
466		zfs_error_aux(hdl, strerror(errno));
467		return (zfs_error_fmt(hdl, EZFS_UMOUNTFAILED,
468		    dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
469		    mountpoint));
470	}
471
472	return (0);
473}
474
475/*
476 * Unmount the given filesystem.
477 */
478int
479zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
480{
481	libzfs_handle_t *hdl = zhp->zfs_hdl;
482	struct mnttab entry;
483	char *mntpt = NULL;
484
485	/* check to see if we need to unmount the filesystem */
486	if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
487	    libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
488		/*
489		 * mountpoint may have come from a call to
490		 * getmnt/getmntany if it isn't NULL. If it is NULL,
491		 * we know it comes from libzfs_mnttab_find which can
492		 * then get freed later. We strdup it to play it safe.
493		 */
494		if (mountpoint == NULL)
495			mntpt = zfs_strdup(hdl, entry.mnt_mountp);
496		else
497			mntpt = zfs_strdup(hdl, mountpoint);
498
499		/*
500		 * Unshare and unmount the filesystem
501		 */
502		if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0)
503			return (-1);
504
505		if (unmount_one(hdl, mntpt, flags) != 0) {
506			free(mntpt);
507			(void) zfs_shareall(zhp);
508			return (-1);
509		}
510		libzfs_mnttab_remove(hdl, zhp->zfs_name);
511		free(mntpt);
512	}
513
514	return (0);
515}
516
517/*
518 * Unmount this filesystem and any children inheriting the mountpoint property.
519 * To do this, just act like we're changing the mountpoint property, but don't
520 * remount the filesystems afterwards.
521 */
522int
523zfs_unmountall(zfs_handle_t *zhp, int flags)
524{
525	prop_changelist_t *clp;
526	int ret;
527
528	clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT, 0, flags);
529	if (clp == NULL)
530		return (-1);
531
532	ret = changelist_prefix(clp);
533	changelist_free(clp);
534
535	return (ret);
536}
537
538boolean_t
539zfs_is_shared(zfs_handle_t *zhp)
540{
541	zfs_share_type_t rc = 0;
542	zfs_share_proto_t *curr_proto;
543
544	if (ZFS_IS_VOLUME(zhp))
545		return (B_FALSE);
546
547	for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
548	    curr_proto++)
549		rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
550
551	return (rc ? B_TRUE : B_FALSE);
552}
553
554int
555zfs_share(zfs_handle_t *zhp)
556{
557	assert(!ZFS_IS_VOLUME(zhp));
558	return (zfs_share_proto(zhp, share_all_proto));
559}
560
561int
562zfs_unshare(zfs_handle_t *zhp)
563{
564	assert(!ZFS_IS_VOLUME(zhp));
565	return (zfs_unshareall(zhp));
566}
567
568/*
569 * Check to see if the filesystem is currently shared.
570 */
571zfs_share_type_t
572zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
573{
574	char *mountpoint;
575	zfs_share_type_t rc;
576
577	if (!zfs_is_mounted(zhp, &mountpoint))
578		return (SHARED_NOT_SHARED);
579
580	if ((rc = is_shared(zhp->zfs_hdl, mountpoint, proto))
581	    != SHARED_NOT_SHARED) {
582		if (where != NULL)
583			*where = mountpoint;
584		else
585			free(mountpoint);
586		return (rc);
587	} else {
588		free(mountpoint);
589		return (SHARED_NOT_SHARED);
590	}
591}
592
593boolean_t
594zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
595{
596	return (zfs_is_shared_proto(zhp, where,
597	    PROTO_NFS) != SHARED_NOT_SHARED);
598}
599
600boolean_t
601zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
602{
603	return (zfs_is_shared_proto(zhp, where,
604	    PROTO_SMB) != SHARED_NOT_SHARED);
605}
606
607/*
608 * Make sure things will work if libshare isn't installed by using
609 * wrapper functions that check to see that the pointers to functions
610 * initialized in _zfs_init_libshare() are actually present.
611 */
612
613static sa_handle_t (*_sa_init)(int);
614static sa_handle_t (*_sa_init_arg)(int, void *);
615static void (*_sa_fini)(sa_handle_t);
616static sa_share_t (*_sa_find_share)(sa_handle_t, char *);
617static int (*_sa_enable_share)(sa_share_t, char *);
618static int (*_sa_disable_share)(sa_share_t, char *);
619static char *(*_sa_errorstr)(int);
620static int (*_sa_parse_legacy_options)(sa_group_t, char *, char *);
621static boolean_t (*_sa_needs_refresh)(sa_handle_t *);
622static libzfs_handle_t *(*_sa_get_zfs_handle)(sa_handle_t);
623static int (*_sa_zfs_process_share)(sa_handle_t, sa_group_t, sa_share_t,
624    char *, char *, zprop_source_t, char *, char *, char *);
625static void (*_sa_update_sharetab_ts)(sa_handle_t);
626
627/*
628 * _zfs_init_libshare()
629 *
630 * Find the libshare.so.1 entry points that we use here and save the
631 * values to be used later. This is triggered by the runtime loader.
632 * Make sure the correct ISA version is loaded.
633 */
634
635#pragma init(_zfs_init_libshare)
636static void
637_zfs_init_libshare(void)
638{
639	void *libshare;
640	char path[MAXPATHLEN];
641	char isa[MAXISALEN];
642
643#if defined(_LP64)
644	if (sysinfo(SI_ARCHITECTURE_64, isa, MAXISALEN) == -1)
645		isa[0] = '\0';
646#else
647	isa[0] = '\0';
648#endif
649	(void) snprintf(path, MAXPATHLEN,
650	    "/usr/lib/%s/libshare.so.1", isa);
651
652	if ((libshare = dlopen(path, RTLD_LAZY | RTLD_GLOBAL)) != NULL) {
653		_sa_init = (sa_handle_t (*)(int))dlsym(libshare, "sa_init");
654		_sa_init_arg = (sa_handle_t (*)(int, void *))dlsym(libshare,
655		    "sa_init_arg");
656		_sa_fini = (void (*)(sa_handle_t))dlsym(libshare, "sa_fini");
657		_sa_find_share = (sa_share_t (*)(sa_handle_t, char *))
658		    dlsym(libshare, "sa_find_share");
659		_sa_enable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
660		    "sa_enable_share");
661		_sa_disable_share = (int (*)(sa_share_t, char *))dlsym(libshare,
662		    "sa_disable_share");
663		_sa_errorstr = (char *(*)(int))dlsym(libshare, "sa_errorstr");
664		_sa_parse_legacy_options = (int (*)(sa_group_t, char *, char *))
665		    dlsym(libshare, "sa_parse_legacy_options");
666		_sa_needs_refresh = (boolean_t (*)(sa_handle_t *))
667		    dlsym(libshare, "sa_needs_refresh");
668		_sa_get_zfs_handle = (libzfs_handle_t *(*)(sa_handle_t))
669		    dlsym(libshare, "sa_get_zfs_handle");
670		_sa_zfs_process_share = (int (*)(sa_handle_t, sa_group_t,
671		    sa_share_t, char *, char *, zprop_source_t, char *,
672		    char *, char *))dlsym(libshare, "sa_zfs_process_share");
673		_sa_update_sharetab_ts = (void (*)(sa_handle_t))
674		    dlsym(libshare, "sa_update_sharetab_ts");
675		if (_sa_init == NULL || _sa_init_arg == NULL ||
676		    _sa_fini == NULL || _sa_find_share == NULL ||
677		    _sa_enable_share == NULL || _sa_disable_share == NULL ||
678		    _sa_errorstr == NULL || _sa_parse_legacy_options == NULL ||
679		    _sa_needs_refresh == NULL || _sa_get_zfs_handle == NULL ||
680		    _sa_zfs_process_share == NULL ||
681		    _sa_update_sharetab_ts == NULL) {
682			_sa_init = NULL;
683			_sa_init_arg = NULL;
684			_sa_fini = NULL;
685			_sa_disable_share = NULL;
686			_sa_enable_share = NULL;
687			_sa_errorstr = NULL;
688			_sa_parse_legacy_options = NULL;
689			(void) dlclose(libshare);
690			_sa_needs_refresh = NULL;
691			_sa_get_zfs_handle = NULL;
692			_sa_zfs_process_share = NULL;
693			_sa_update_sharetab_ts = NULL;
694		}
695	}
696}
697
698/*
699 * zfs_init_libshare(zhandle, service)
700 *
701 * Initialize the libshare API if it hasn't already been initialized.
702 * In all cases it returns 0 if it succeeded and an error if not. The
703 * service value is which part(s) of the API to initialize and is a
704 * direct map to the libshare sa_init(service) interface.
705 */
706static int
707zfs_init_libshare_impl(libzfs_handle_t *zhandle, int service, void *arg)
708{
709	/*
710	 * libshare is either not installed or we're in a branded zone. The
711	 * rest of the wrapper functions around the libshare calls already
712	 * handle NULL function pointers, but we don't want the callers of
713	 * zfs_init_libshare() to fail prematurely if libshare is not available.
714	 */
715	if (_sa_init == NULL)
716		return (SA_OK);
717
718	/*
719	 * Attempt to refresh libshare. This is necessary if there was a cache
720	 * miss for a new ZFS dataset that was just created, or if state of the
721	 * sharetab file has changed since libshare was last initialized. We
722	 * want to make sure so check timestamps to see if a different process
723	 * has updated any of the configuration. If there was some non-ZFS
724	 * change, we need to re-initialize the internal cache.
725	 */
726	if (_sa_needs_refresh != NULL &&
727	    _sa_needs_refresh(zhandle->libzfs_sharehdl)) {
728		zfs_uninit_libshare(zhandle);
729		zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
730	}
731
732	if (zhandle && zhandle->libzfs_sharehdl == NULL)
733		zhandle->libzfs_sharehdl = _sa_init_arg(service, arg);
734
735	if (zhandle->libzfs_sharehdl == NULL)
736		return (SA_NO_MEMORY);
737
738	return (SA_OK);
739}
740int
741zfs_init_libshare(libzfs_handle_t *zhandle, int service)
742{
743	return (zfs_init_libshare_impl(zhandle, service, NULL));
744}
745
746int
747zfs_init_libshare_arg(libzfs_handle_t *zhandle, int service, void *arg)
748{
749	return (zfs_init_libshare_impl(zhandle, service, arg));
750}
751
752
753/*
754 * zfs_uninit_libshare(zhandle)
755 *
756 * Uninitialize the libshare API if it hasn't already been
757 * uninitialized. It is OK to call multiple times.
758 */
759void
760zfs_uninit_libshare(libzfs_handle_t *zhandle)
761{
762	if (zhandle != NULL && zhandle->libzfs_sharehdl != NULL) {
763		if (_sa_fini != NULL)
764			_sa_fini(zhandle->libzfs_sharehdl);
765		zhandle->libzfs_sharehdl = NULL;
766	}
767}
768
769/*
770 * zfs_parse_options(options, proto)
771 *
772 * Call the legacy parse interface to get the protocol specific
773 * options using the NULL arg to indicate that this is a "parse" only.
774 */
775int
776zfs_parse_options(char *options, zfs_share_proto_t proto)
777{
778	if (_sa_parse_legacy_options != NULL) {
779		return (_sa_parse_legacy_options(NULL, options,
780		    proto_table[proto].p_name));
781	}
782	return (SA_CONFIG_ERR);
783}
784
785/*
786 * zfs_sa_find_share(handle, path)
787 *
788 * wrapper around sa_find_share to find a share path in the
789 * configuration.
790 */
791static sa_share_t
792zfs_sa_find_share(sa_handle_t handle, char *path)
793{
794	if (_sa_find_share != NULL)
795		return (_sa_find_share(handle, path));
796	return (NULL);
797}
798
799/*
800 * zfs_sa_enable_share(share, proto)
801 *
802 * Wrapper for sa_enable_share which enables a share for a specified
803 * protocol.
804 */
805static int
806zfs_sa_enable_share(sa_share_t share, char *proto)
807{
808	if (_sa_enable_share != NULL)
809		return (_sa_enable_share(share, proto));
810	return (SA_CONFIG_ERR);
811}
812
813/*
814 * zfs_sa_disable_share(share, proto)
815 *
816 * Wrapper for sa_enable_share which disables a share for a specified
817 * protocol.
818 */
819static int
820zfs_sa_disable_share(sa_share_t share, char *proto)
821{
822	if (_sa_disable_share != NULL)
823		return (_sa_disable_share(share, proto));
824	return (SA_CONFIG_ERR);
825}
826
827/*
828 * Share the given filesystem according to the options in the specified
829 * protocol specific properties (sharenfs, sharesmb).  We rely
830 * on "libshare" to the dirty work for us.
831 */
832static int
833zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
834{
835	char mountpoint[ZFS_MAXPROPLEN];
836	char shareopts[ZFS_MAXPROPLEN];
837	char sourcestr[ZFS_MAXPROPLEN];
838	libzfs_handle_t *hdl = zhp->zfs_hdl;
839	sa_share_t share;
840	zfs_share_proto_t *curr_proto;
841	zprop_source_t sourcetype;
842	int ret;
843
844	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL))
845		return (0);
846
847	for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
848		/*
849		 * Return success if there are no share options.
850		 */
851		if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
852		    shareopts, sizeof (shareopts), &sourcetype, sourcestr,
853		    ZFS_MAXPROPLEN, B_FALSE) != 0 ||
854		    strcmp(shareopts, "off") == 0)
855			continue;
856		ret = zfs_init_libshare_arg(hdl, SA_INIT_ONE_SHARE_FROM_HANDLE,
857		    zhp);
858		if (ret != SA_OK) {
859			(void) zfs_error_fmt(hdl, EZFS_SHARENFSFAILED,
860			    dgettext(TEXT_DOMAIN, "cannot share '%s': %s"),
861			    zfs_get_name(zhp), _sa_errorstr != NULL ?
862			    _sa_errorstr(ret) : "");
863			return (-1);
864		}
865
866		/*
867		 * If the 'zoned' property is set, then zfs_is_mountable()
868		 * will have already bailed out if we are in the global zone.
869		 * But local zones cannot be NFS servers, so we ignore it for
870		 * local zones as well.
871		 */
872		if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
873			continue;
874
875		share = zfs_sa_find_share(hdl->libzfs_sharehdl, mountpoint);
876		if (share == NULL) {
877			/*
878			 * This may be a new file system that was just
879			 * created so isn't in the internal cache
880			 * (second time through). Rather than
881			 * reloading the entire configuration, we can
882			 * assume ZFS has done the checking and it is
883			 * safe to add this to the internal
884			 * configuration.
885			 */
886			if (_sa_zfs_process_share(hdl->libzfs_sharehdl,
887			    NULL, NULL, mountpoint,
888			    proto_table[*curr_proto].p_name, sourcetype,
889			    shareopts, sourcestr, zhp->zfs_name) != SA_OK) {
890				(void) zfs_error_fmt(hdl,
891				    proto_table[*curr_proto].p_share_err,
892				    dgettext(TEXT_DOMAIN, "cannot share '%s'"),
893				    zfs_get_name(zhp));
894				return (-1);
895			}
896			share = zfs_sa_find_share(hdl->libzfs_sharehdl,
897			    mountpoint);
898		}
899		if (share != NULL) {
900			int err;
901			err = zfs_sa_enable_share(share,
902			    proto_table[*curr_proto].p_name);
903			if (err != SA_OK) {
904				(void) zfs_error_fmt(hdl,
905				    proto_table[*curr_proto].p_share_err,
906				    dgettext(TEXT_DOMAIN, "cannot share '%s'"),
907				    zfs_get_name(zhp));
908				return (-1);
909			}
910		} else {
911			(void) zfs_error_fmt(hdl,
912			    proto_table[*curr_proto].p_share_err,
913			    dgettext(TEXT_DOMAIN, "cannot share '%s'"),
914			    zfs_get_name(zhp));
915			return (-1);
916		}
917
918	}
919	return (0);
920}
921
922
923int
924zfs_share_nfs(zfs_handle_t *zhp)
925{
926	return (zfs_share_proto(zhp, nfs_only));
927}
928
929int
930zfs_share_smb(zfs_handle_t *zhp)
931{
932	return (zfs_share_proto(zhp, smb_only));
933}
934
935int
936zfs_shareall(zfs_handle_t *zhp)
937{
938	return (zfs_share_proto(zhp, share_all_proto));
939}
940
941/*
942 * Unshare a filesystem by mountpoint.
943 */
944static int
945unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
946    zfs_share_proto_t proto)
947{
948	sa_share_t share;
949	int err;
950	char *mntpt;
951
952	/*
953	 * Mountpoint could get trashed if libshare calls getmntany
954	 * which it does during API initialization, so strdup the
955	 * value.
956	 */
957	mntpt = zfs_strdup(hdl, mountpoint);
958
959	/*
960	 * make sure libshare initialized, initialize everything because we
961	 * don't know what other unsharing may happen later. Functions up the
962	 * stack are allowed to initialize instead a subset of shares at the
963	 * time the set is known.
964	 */
965	if ((err = zfs_init_libshare_arg(hdl, SA_INIT_ONE_SHARE_FROM_NAME,
966	    (void *)name)) != SA_OK) {
967		free(mntpt);	/* don't need the copy anymore */
968		return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
969		    dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
970		    name, _sa_errorstr(err)));
971	}
972
973	share = zfs_sa_find_share(hdl->libzfs_sharehdl, mntpt);
974	free(mntpt);	/* don't need the copy anymore */
975
976	if (share != NULL) {
977		err = zfs_sa_disable_share(share, proto_table[proto].p_name);
978		if (err != SA_OK) {
979			return (zfs_error_fmt(hdl,
980			    proto_table[proto].p_unshare_err,
981			    dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
982			    name, _sa_errorstr(err)));
983		}
984	} else {
985		return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
986		    dgettext(TEXT_DOMAIN, "cannot unshare '%s': not found"),
987		    name));
988	}
989	return (0);
990}
991
992/*
993 * Unshare the given filesystem.
994 */
995int
996zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
997    zfs_share_proto_t *proto)
998{
999	libzfs_handle_t *hdl = zhp->zfs_hdl;
1000	struct mnttab entry;
1001	char *mntpt = NULL;
1002
1003	/* check to see if need to unmount the filesystem */
1004	rewind(zhp->zfs_hdl->libzfs_mnttab);
1005	if (mountpoint != NULL)
1006		mountpoint = mntpt = zfs_strdup(hdl, mountpoint);
1007
1008	if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
1009	    libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
1010		zfs_share_proto_t *curr_proto;
1011
1012		if (mountpoint == NULL)
1013			mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
1014
1015		for (curr_proto = proto; *curr_proto != PROTO_END;
1016		    curr_proto++) {
1017
1018			if (is_shared(hdl, mntpt, *curr_proto) &&
1019			    unshare_one(hdl, zhp->zfs_name,
1020			    mntpt, *curr_proto) != 0) {
1021				if (mntpt != NULL)
1022					free(mntpt);
1023				return (-1);
1024			}
1025		}
1026	}
1027	if (mntpt != NULL)
1028		free(mntpt);
1029
1030	return (0);
1031}
1032
1033int
1034zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
1035{
1036	return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
1037}
1038
1039int
1040zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
1041{
1042	return (zfs_unshare_proto(zhp, mountpoint, smb_only));
1043}
1044
1045/*
1046 * Same as zfs_unmountall(), but for NFS and SMB unshares.
1047 */
1048int
1049zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
1050{
1051	prop_changelist_t *clp;
1052	int ret;
1053
1054	clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
1055	if (clp == NULL)
1056		return (-1);
1057
1058	ret = changelist_unshare(clp, proto);
1059	changelist_free(clp);
1060
1061	return (ret);
1062}
1063
1064int
1065zfs_unshareall_nfs(zfs_handle_t *zhp)
1066{
1067	return (zfs_unshareall_proto(zhp, nfs_only));
1068}
1069
1070int
1071zfs_unshareall_smb(zfs_handle_t *zhp)
1072{
1073	return (zfs_unshareall_proto(zhp, smb_only));
1074}
1075
1076int
1077zfs_unshareall(zfs_handle_t *zhp)
1078{
1079	return (zfs_unshareall_proto(zhp, share_all_proto));
1080}
1081
1082int
1083zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
1084{
1085	return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
1086}
1087
1088/*
1089 * Remove the mountpoint associated with the current dataset, if necessary.
1090 * We only remove the underlying directory if:
1091 *
1092 *	- The mountpoint is not 'none' or 'legacy'
1093 *	- The mountpoint is non-empty
1094 *	- The mountpoint is the default or inherited
1095 *	- The 'zoned' property is set, or we're in a local zone
1096 *
1097 * Any other directories we leave alone.
1098 */
1099void
1100remove_mountpoint(zfs_handle_t *zhp)
1101{
1102	char mountpoint[ZFS_MAXPROPLEN];
1103	zprop_source_t source;
1104
1105	if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
1106	    &source))
1107		return;
1108
1109	if (source == ZPROP_SRC_DEFAULT ||
1110	    source == ZPROP_SRC_INHERITED) {
1111		/*
1112		 * Try to remove the directory, silently ignoring any errors.
1113		 * The filesystem may have since been removed or moved around,
1114		 * and this error isn't really useful to the administrator in
1115		 * any way.
1116		 */
1117		(void) rmdir(mountpoint);
1118	}
1119}
1120
1121/*
1122 * Add the given zfs handle to the cb_handles array, dynamically reallocating
1123 * the array if it is out of space.
1124 */
1125void
1126libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
1127{
1128	if (cbp->cb_alloc == cbp->cb_used) {
1129		size_t newsz;
1130		zfs_handle_t **newhandles;
1131
1132		newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
1133		newhandles = zfs_realloc(zhp->zfs_hdl,
1134		    cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
1135		    newsz * sizeof (zfs_handle_t *));
1136		cbp->cb_handles = newhandles;
1137		cbp->cb_alloc = newsz;
1138	}
1139	cbp->cb_handles[cbp->cb_used++] = zhp;
1140}
1141
1142/*
1143 * Recursive helper function used during file system enumeration
1144 */
1145static int
1146zfs_iter_cb(zfs_handle_t *zhp, void *data)
1147{
1148	get_all_cb_t *cbp = data;
1149
1150	if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
1151		zfs_close(zhp);
1152		return (0);
1153	}
1154
1155	if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
1156		zfs_close(zhp);
1157		return (0);
1158	}
1159
1160	if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1161	    ZFS_KEYSTATUS_UNAVAILABLE) {
1162		zfs_close(zhp);
1163		return (0);
1164	}
1165
1166	/*
1167	 * If this filesystem is inconsistent and has a receive resume
1168	 * token, we can not mount it.
1169	 */
1170	if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
1171	    zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
1172	    NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
1173		zfs_close(zhp);
1174		return (0);
1175	}
1176
1177	libzfs_add_handle(cbp, zhp);
1178	if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
1179		zfs_close(zhp);
1180		return (-1);
1181	}
1182	return (0);
1183}
1184
1185/*
1186 * Sort comparator that compares two mountpoint paths. We sort these paths so
1187 * that subdirectories immediately follow their parents. This means that we
1188 * effectively treat the '/' character as the lowest value non-nul char.
1189 * Since filesystems from non-global zones can have the same mountpoint
1190 * as other filesystems, the comparator sorts global zone filesystems to
1191 * the top of the list. This means that the global zone will traverse the
1192 * filesystem list in the correct order and can stop when it sees the
1193 * first zoned filesystem. In a non-global zone, only the delegated
1194 * filesystems are seen.
1195 *
1196 * An example sorted list using this comparator would look like:
1197 *
1198 * /foo
1199 * /foo/bar
1200 * /foo/bar/baz
1201 * /foo/baz
1202 * /foo.bar
1203 * /foo (NGZ1)
1204 * /foo (NGZ2)
1205 *
1206 * The mounting code depends on this ordering to deterministically iterate
1207 * over filesystems in order to spawn parallel mount tasks.
1208 */
1209static int
1210mountpoint_cmp(const void *arga, const void *argb)
1211{
1212	zfs_handle_t *const *zap = arga;
1213	zfs_handle_t *za = *zap;
1214	zfs_handle_t *const *zbp = argb;
1215	zfs_handle_t *zb = *zbp;
1216	char mounta[MAXPATHLEN];
1217	char mountb[MAXPATHLEN];
1218	const char *a = mounta;
1219	const char *b = mountb;
1220	boolean_t gota, gotb;
1221	uint64_t zoneda, zonedb;
1222
1223	zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
1224	zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
1225	if (zoneda && !zonedb)
1226		return (1);
1227	if (!zoneda && zonedb)
1228		return (-1);
1229
1230	gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
1231	if (gota) {
1232		verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
1233		    sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
1234	}
1235	gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
1236	if (gotb) {
1237		verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
1238		    sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
1239	}
1240
1241	if (gota && gotb) {
1242		while (*a != '\0' && (*a == *b)) {
1243			a++;
1244			b++;
1245		}
1246		if (*a == *b)
1247			return (0);
1248		if (*a == '\0')
1249			return (-1);
1250		if (*b == '\0')
1251			return (1);
1252		if (*a == '/')
1253			return (-1);
1254		if (*b == '/')
1255			return (1);
1256		return (*a < *b ? -1 : *a > *b);
1257	}
1258
1259	if (gota)
1260		return (-1);
1261	if (gotb)
1262		return (1);
1263
1264	/*
1265	 * If neither filesystem has a mountpoint, revert to sorting by
1266	 * dataset name.
1267	 */
1268	return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
1269}
1270
1271/*
1272 * Return true if path2 is a child of path1.
1273 */
1274static boolean_t
1275libzfs_path_contains(const char *path1, const char *path2)
1276{
1277	return (strstr(path2, path1) == path2 && path2[strlen(path1)] == '/');
1278}
1279
1280/*
1281 * Given a mountpoint specified by idx in the handles array, find the first
1282 * non-descendent of that mountpoint and return its index. Descendant paths
1283 * start with the parent's path. This function relies on the ordering
1284 * enforced by mountpoint_cmp().
1285 */
1286static int
1287non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
1288{
1289	char parent[ZFS_MAXPROPLEN];
1290	char child[ZFS_MAXPROPLEN];
1291	int i;
1292
1293	verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
1294	    sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
1295
1296	for (i = idx + 1; i < num_handles; i++) {
1297		verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
1298		    sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1299		if (!libzfs_path_contains(parent, child))
1300			break;
1301	}
1302	return (i);
1303}
1304
1305typedef struct mnt_param {
1306	libzfs_handle_t	*mnt_hdl;
1307	zfs_taskq_t	*mnt_tq;
1308	zfs_handle_t	**mnt_zhps; /* filesystems to mount */
1309	size_t		mnt_num_handles;
1310	int		mnt_idx;	/* Index of selected entry to mount */
1311	zfs_iter_f	mnt_func;
1312	void		*mnt_data;
1313} mnt_param_t;
1314
1315/*
1316 * Allocate and populate the parameter struct for mount function, and
1317 * schedule mounting of the entry selected by idx.
1318 */
1319static void
1320zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
1321    size_t num_handles, int idx, zfs_iter_f func, void *data, zfs_taskq_t *tq)
1322{
1323	mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
1324
1325	mnt_param->mnt_hdl = hdl;
1326	mnt_param->mnt_tq = tq;
1327	mnt_param->mnt_zhps = handles;
1328	mnt_param->mnt_num_handles = num_handles;
1329	mnt_param->mnt_idx = idx;
1330	mnt_param->mnt_func = func;
1331	mnt_param->mnt_data = data;
1332
1333	(void) zfs_taskq_dispatch(tq, zfs_mount_task, (void*)mnt_param,
1334	    ZFS_TQ_SLEEP);
1335}
1336
1337/*
1338 * This is the structure used to keep state of mounting or sharing operations
1339 * during a call to zpool_enable_datasets().
1340 */
1341typedef struct mount_state {
1342	/*
1343	 * ms_mntstatus is set to -1 if any mount fails. While multiple threads
1344	 * could update this variable concurrently, no synchronization is
1345	 * needed as it's only ever set to -1.
1346	 */
1347	int		ms_mntstatus;
1348	int		ms_mntflags;
1349	const char	*ms_mntopts;
1350} mount_state_t;
1351
1352static int
1353zfs_mount_one(zfs_handle_t *zhp, void *arg)
1354{
1355	mount_state_t *ms = arg;
1356	int ret = 0;
1357
1358	if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
1359	    ZFS_KEYSTATUS_UNAVAILABLE)
1360		return (0);
1361
1362	if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
1363		ret = ms->ms_mntstatus = -1;
1364	return (ret);
1365}
1366
1367static int
1368zfs_share_one(zfs_handle_t *zhp, void *arg)
1369{
1370	mount_state_t *ms = arg;
1371	int ret = 0;
1372
1373	if (zfs_share(zhp) != 0)
1374		ret = ms->ms_mntstatus = -1;
1375	return (ret);
1376}
1377
1378/*
1379 * Task queue function to mount one file system. On completion, it finds and
1380 * schedules its children to be mounted. This depends on the sorting done in
1381 * zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
1382 * each descending from the previous) will have no parallelism since we always
1383 * have to wait for the parent to finish mounting before we can schedule
1384 * its children.
1385 */
1386static void
1387zfs_mount_task(void *arg)
1388{
1389	mnt_param_t *mp = arg;
1390	int idx = mp->mnt_idx;
1391	zfs_handle_t **handles = mp->mnt_zhps;
1392	size_t num_handles = mp->mnt_num_handles;
1393	char mountpoint[ZFS_MAXPROPLEN];
1394
1395	verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
1396	    sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
1397
1398	if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
1399		return;
1400
1401	/*
1402	 * We dispatch tasks to mount filesystems with mountpoints underneath
1403	 * this one. We do this by dispatching the next filesystem with a
1404	 * descendant mountpoint of the one we just mounted, then skip all of
1405	 * its descendants, dispatch the next descendant mountpoint, and so on.
1406	 * The non_descendant_idx() function skips over filesystems that are
1407	 * descendants of the filesystem we just dispatched.
1408	 */
1409	for (int i = idx + 1; i < num_handles;
1410	    i = non_descendant_idx(handles, num_handles, i)) {
1411		char child[ZFS_MAXPROPLEN];
1412		verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
1413		    child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
1414
1415		if (!libzfs_path_contains(mountpoint, child))
1416			break; /* not a descendant, return */
1417		zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
1418		    mp->mnt_func, mp->mnt_data, mp->mnt_tq);
1419	}
1420	free(mp);
1421}
1422
1423/*
1424 * Issue the func callback for each ZFS handle contained in the handles
1425 * array. This function is used to mount all datasets, and so this function
1426 * guarantees that filesystems for parent mountpoints are called before their
1427 * children. As such, before issuing any callbacks, we first sort the array
1428 * of handles by mountpoint.
1429 *
1430 * Callbacks are issued in one of two ways:
1431 *
1432 * 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
1433 *    environment variable is set, then we issue callbacks sequentially.
1434 *
1435 * 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
1436 *    environment variable is not set, then we use a taskq to dispatch threads
1437 *    to mount filesystems is parallel. This function dispatches tasks to mount
1438 *    the filesystems at the top-level mountpoints, and these tasks in turn
1439 *    are responsible for recursively mounting filesystems in their children
1440 *    mountpoints.
1441 */
1442void
1443zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
1444    size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
1445{
1446	zoneid_t zoneid = getzoneid();
1447
1448	/*
1449	 * The ZFS_SERIAL_MOUNT environment variable is an undocumented
1450	 * variable that can be used as a convenience to do a/b comparison
1451	 * of serial vs. parallel mounting.
1452	 */
1453	boolean_t serial_mount = !parallel ||
1454	    (getenv("ZFS_SERIAL_MOUNT") != NULL);
1455
1456	/*
1457	 * Sort the datasets by mountpoint. See mountpoint_cmp for details
1458	 * of how these are sorted.
1459	 */
1460	qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
1461
1462	if (serial_mount) {
1463		for (int i = 0; i < num_handles; i++) {
1464			func(handles[i], data);
1465		}
1466		return;
1467	}
1468
1469	/*
1470	 * Issue the callback function for each dataset using a parallel
1471	 * algorithm that uses a taskq to manage threads.
1472	 */
1473	zfs_taskq_t *tq = zfs_taskq_create("mount_taskq", mount_tq_nthr, 0,
1474	    mount_tq_nthr, mount_tq_nthr, ZFS_TASKQ_PREPOPULATE);
1475
1476	/*
1477	 * There may be multiple "top level" mountpoints outside of the pool's
1478	 * root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
1479	 * these.
1480	 */
1481	for (int i = 0; i < num_handles;
1482	    i = non_descendant_idx(handles, num_handles, i)) {
1483		/*
1484		 * Since the mountpoints have been sorted so that the zoned
1485		 * filesystems are at the end, a zoned filesystem seen from
1486		 * the global zone means that we're done.
1487		 */
1488		if (zoneid == GLOBAL_ZONEID &&
1489		    zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
1490			break;
1491		zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
1492		    tq);
1493	}
1494
1495	zfs_taskq_wait(tq); /* wait for all scheduled mounts to complete */
1496	zfs_taskq_destroy(tq);
1497}
1498
1499/*
1500 * Mount and share all datasets within the given pool.  This assumes that no
1501 * datasets within the pool are currently mounted.
1502 */
1503#pragma weak zpool_mount_datasets = zpool_enable_datasets
1504int
1505zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
1506{
1507	get_all_cb_t cb = { 0 };
1508	mount_state_t ms = { 0 };
1509	zfs_handle_t *zfsp;
1510	sa_init_selective_arg_t sharearg;
1511	int ret = 0;
1512
1513	if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
1514	    ZFS_TYPE_DATASET)) == NULL)
1515		goto out;
1516
1517
1518	/*
1519	 * Gather all non-snapshot datasets within the pool. Start by adding
1520	 * the root filesystem for this pool to the list, and then iterate
1521	 * over all child filesystems.
1522	 */
1523	libzfs_add_handle(&cb, zfsp);
1524	if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
1525		goto out;
1526
1527	ms.ms_mntopts = mntopts;
1528	ms.ms_mntflags = flags;
1529	zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1530	    zfs_mount_one, &ms, B_TRUE);
1531	if (ms.ms_mntstatus != 0)
1532		ret = ms.ms_mntstatus;
1533
1534	/*
1535	 * Share all filesystems that need to be shared. This needs to be
1536	 * a separate pass because libshare is not mt-safe, and so we need
1537	 * to share serially.
1538	 */
1539	sharearg.zhandle_arr = cb.cb_handles;
1540	sharearg.zhandle_len = cb.cb_used;
1541	if ((ret = zfs_init_libshare_arg(zhp->zpool_hdl,
1542	    SA_INIT_SHARE_API_SELECTIVE, &sharearg)) != 0)
1543		goto out;
1544
1545	ms.ms_mntstatus = 0;
1546	zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
1547	    zfs_share_one, &ms, B_FALSE);
1548	if (ms.ms_mntstatus != 0)
1549		ret = ms.ms_mntstatus;
1550
1551out:
1552	for (int i = 0; i < cb.cb_used; i++)
1553		zfs_close(cb.cb_handles[i]);
1554	free(cb.cb_handles);
1555
1556	return (ret);
1557}
1558
1559static int
1560mountpoint_compare(const void *a, const void *b)
1561{
1562	const char *mounta = *((char **)a);
1563	const char *mountb = *((char **)b);
1564
1565	return (strcmp(mountb, mounta));
1566}
1567
1568/* alias for 2002/240 */
1569#pragma weak zpool_unmount_datasets = zpool_disable_datasets
1570/*
1571 * Unshare and unmount all datasets within the given pool.  We don't want to
1572 * rely on traversing the DSL to discover the filesystems within the pool,
1573 * because this may be expensive (if not all of them are mounted), and can fail
1574 * arbitrarily (on I/O error, for example).  Instead, we walk /etc/mnttab and
1575 * gather all the filesystems that are currently mounted.
1576 */
1577int
1578zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
1579{
1580	int used, alloc;
1581	struct mnttab entry;
1582	size_t namelen;
1583	char **mountpoints = NULL;
1584	zfs_handle_t **datasets = NULL;
1585	libzfs_handle_t *hdl = zhp->zpool_hdl;
1586	int i;
1587	int ret = -1;
1588	int flags = (force ? MS_FORCE : 0);
1589	sa_init_selective_arg_t sharearg;
1590
1591	namelen = strlen(zhp->zpool_name);
1592
1593	rewind(hdl->libzfs_mnttab);
1594	used = alloc = 0;
1595	while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
1596		/*
1597		 * Ignore non-ZFS entries.
1598		 */
1599		if (entry.mnt_fstype == NULL ||
1600		    strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
1601			continue;
1602
1603		/*
1604		 * Ignore filesystems not within this pool.
1605		 */
1606		if (entry.mnt_mountp == NULL ||
1607		    strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
1608		    (entry.mnt_special[namelen] != '/' &&
1609		    entry.mnt_special[namelen] != '\0'))
1610			continue;
1611
1612		/*
1613		 * At this point we've found a filesystem within our pool.  Add
1614		 * it to our growing list.
1615		 */
1616		if (used == alloc) {
1617			if (alloc == 0) {
1618				if ((mountpoints = zfs_alloc(hdl,
1619				    8 * sizeof (void *))) == NULL)
1620					goto out;
1621
1622				if ((datasets = zfs_alloc(hdl,
1623				    8 * sizeof (void *))) == NULL)
1624					goto out;
1625
1626				alloc = 8;
1627			} else {
1628				void *ptr;
1629
1630				if ((ptr = zfs_realloc(hdl, mountpoints,
1631				    alloc * sizeof (void *),
1632				    alloc * 2 * sizeof (void *))) == NULL)
1633					goto out;
1634				mountpoints = ptr;
1635
1636				if ((ptr = zfs_realloc(hdl, datasets,
1637				    alloc * sizeof (void *),
1638				    alloc * 2 * sizeof (void *))) == NULL)
1639					goto out;
1640				datasets = ptr;
1641
1642				alloc *= 2;
1643			}
1644		}
1645
1646		if ((mountpoints[used] = zfs_strdup(hdl,
1647		    entry.mnt_mountp)) == NULL)
1648			goto out;
1649
1650		/*
1651		 * This is allowed to fail, in case there is some I/O error.  It
1652		 * is only used to determine if we need to remove the underlying
1653		 * mountpoint, so failure is not fatal.
1654		 */
1655		datasets[used] = make_dataset_handle(hdl, entry.mnt_special);
1656
1657		used++;
1658	}
1659
1660	/*
1661	 * At this point, we have the entire list of filesystems, so sort it by
1662	 * mountpoint.
1663	 */
1664	sharearg.zhandle_arr = datasets;
1665	sharearg.zhandle_len = used;
1666	ret = zfs_init_libshare_arg(hdl, SA_INIT_SHARE_API_SELECTIVE,
1667	    &sharearg);
1668	if (ret != 0)
1669		goto out;
1670	qsort(mountpoints, used, sizeof (char *), mountpoint_compare);
1671
1672	/*
1673	 * Walk through and first unshare everything.
1674	 */
1675	for (i = 0; i < used; i++) {
1676		zfs_share_proto_t *curr_proto;
1677		for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
1678		    curr_proto++) {
1679			if (is_shared(hdl, mountpoints[i], *curr_proto) &&
1680			    unshare_one(hdl, mountpoints[i],
1681			    mountpoints[i], *curr_proto) != 0)
1682				goto out;
1683		}
1684	}
1685
1686	/*
1687	 * Now unmount everything, removing the underlying directories as
1688	 * appropriate.
1689	 */
1690	for (i = 0; i < used; i++) {
1691		if (unmount_one(hdl, mountpoints[i], flags) != 0)
1692			goto out;
1693	}
1694
1695	for (i = 0; i < used; i++) {
1696		if (datasets[i])
1697			remove_mountpoint(datasets[i]);
1698	}
1699
1700	ret = 0;
1701out:
1702	for (i = 0; i < used; i++) {
1703		if (datasets[i])
1704			zfs_close(datasets[i]);
1705		free(mountpoints[i]);
1706	}
1707	free(datasets);
1708	free(mountpoints);
1709
1710	return (ret);
1711}
1712