1b9238976Sth /*
2b9238976Sth  * CDDL HEADER START
3b9238976Sth  *
4b9238976Sth  * The contents of this file are subject to the terms of the
5b9238976Sth  * Common Development and Distribution License (the "License").
6b9238976Sth  * You may not use this file except in compliance with the License.
7b9238976Sth  *
8b9238976Sth  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9b9238976Sth  * or http://www.opensolaris.org/os/licensing.
10b9238976Sth  * See the License for the specific language governing permissions
11b9238976Sth  * and limitations under the License.
12b9238976Sth  *
13b9238976Sth  * When distributing Covered Code, include this CDDL HEADER in each
14b9238976Sth  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15b9238976Sth  * If applicable, add the following below this CDDL HEADER, with the
16b9238976Sth  * fields enclosed by brackets "[]" replaced with your own identifying
17b9238976Sth  * information: Portions Copyright [yyyy] [name of copyright owner]
18b9238976Sth  *
19b9238976Sth  * CDDL HEADER END
20b9238976Sth  */
21b9238976Sth 
22b9238976Sth /*
23546a3997SThomas Haynes  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24b9238976Sth  * Use is subject to license terms.
25b9238976Sth  */
26b9238976Sth 
27b9238976Sth /*
28b9238976Sth  * Support for ephemeral mounts, e.g. mirror-mounts. These mounts are
29b9238976Sth  * triggered from a "stub" rnode via a special set of vnodeops.
30b9238976Sth  */
31b9238976Sth 
32b9238976Sth #include <sys/param.h>
33b9238976Sth #include <sys/types.h>
34b9238976Sth #include <sys/systm.h>
35b9238976Sth #include <sys/cred.h>
36b9238976Sth #include <sys/time.h>
37b9238976Sth #include <sys/vnode.h>
38b9238976Sth #include <sys/vfs.h>
39b9238976Sth #include <sys/vfs_opreg.h>
40b9238976Sth #include <sys/file.h>
41b9238976Sth #include <sys/filio.h>
42b9238976Sth #include <sys/uio.h>
43b9238976Sth #include <sys/buf.h>
44b9238976Sth #include <sys/mman.h>
45b9238976Sth #include <sys/pathname.h>
46b9238976Sth #include <sys/dirent.h>
47b9238976Sth #include <sys/debug.h>
48b9238976Sth #include <sys/vmsystm.h>
49b9238976Sth #include <sys/fcntl.h>
50b9238976Sth #include <sys/flock.h>
51b9238976Sth #include <sys/swap.h>
52b9238976Sth #include <sys/errno.h>
53b9238976Sth #include <sys/strsubr.h>
54b9238976Sth #include <sys/sysmacros.h>
55b9238976Sth #include <sys/kmem.h>
56b9238976Sth #include <sys/mount.h>
57b9238976Sth #include <sys/cmn_err.h>
58b9238976Sth #include <sys/pathconf.h>
59b9238976Sth #include <sys/utsname.h>
60b9238976Sth #include <sys/dnlc.h>
61b9238976Sth #include <sys/acl.h>
62b9238976Sth #include <sys/systeminfo.h>
63b9238976Sth #include <sys/policy.h>
64b9238976Sth #include <sys/sdt.h>
65b9238976Sth #include <sys/list.h>
66b9238976Sth #include <sys/stat.h>
67b9238976Sth #include <sys/mntent.h>
682f172c55SRobert Thurlow #include <sys/priv.h>
69b9238976Sth 
70b9238976Sth #include <rpc/types.h>
71b9238976Sth #include <rpc/auth.h>
72b9238976Sth #include <rpc/clnt.h>
73b9238976Sth 
74b9238976Sth #include <nfs/nfs.h>
75b9238976Sth #include <nfs/nfs_clnt.h>
76b9238976Sth #include <nfs/nfs_acl.h>
77b9238976Sth #include <nfs/lm.h>
78b9238976Sth #include <nfs/nfs4.h>
79b9238976Sth #include <nfs/nfs4_kprot.h>
80b9238976Sth #include <nfs/rnode4.h>
81b9238976Sth #include <nfs/nfs4_clnt.h>
822f172c55SRobert Thurlow #include <nfs/nfsid_map.h>
832f172c55SRobert Thurlow #include <nfs/nfs4_idmap_impl.h>
84b9238976Sth 
85b9238976Sth #include <vm/hat.h>
86b9238976Sth #include <vm/as.h>
87b9238976Sth #include <vm/page.h>
88b9238976Sth #include <vm/pvn.h>
89b9238976Sth #include <vm/seg.h>
90b9238976Sth #include <vm/seg_map.h>
91b9238976Sth #include <vm/seg_kpm.h>
92b9238976Sth #include <vm/seg_vn.h>
93b9238976Sth 
94b9238976Sth #include <fs/fs_subr.h>
95b9238976Sth 
96b9238976Sth #include <sys/ddi.h>
97b9238976Sth #include <sys/int_fmtio.h>
98b9238976Sth 
99f39b8789Sth #include <sys/sunddi.h>
100b9238976Sth 
101546a3997SThomas Haynes #include <sys/priv_names.h>
102546a3997SThomas Haynes 
1032f172c55SRobert Thurlow extern zone_key_t	nfs4clnt_zone_key;
1042f172c55SRobert Thurlow extern zone_key_t	nfsidmap_zone_key;
1052f172c55SRobert Thurlow 
106b9238976Sth /*
107b9238976Sth  * The automatic unmounter thread stuff!
108b9238976Sth  */
109b9238976Sth static int nfs4_trigger_thread_timer = 20;	/* in seconds */
110b9238976Sth 
111b9238976Sth /*
112b9238976Sth  * Just a default....
113b9238976Sth  */
114b9238976Sth static uint_t nfs4_trigger_mount_to = 240;
115b9238976Sth 
116b9238976Sth typedef struct nfs4_trigger_globals {
117b9238976Sth 	kmutex_t		ntg_forest_lock;
118b9238976Sth 	uint_t			ntg_mount_to;
119b9238976Sth 	int			ntg_thread_started;
120b9238976Sth 	nfs4_ephemeral_tree_t	*ntg_forest;
121b9238976Sth } nfs4_trigger_globals_t;
122b9238976Sth 
123b9238976Sth kmutex_t	nfs4_ephemeral_thread_lock;
124b9238976Sth 
125b9238976Sth zone_key_t	nfs4_ephemeral_key = ZONE_KEY_UNINITIALIZED;
126b9238976Sth 
127b9238976Sth static void	nfs4_ephemeral_start_harvester(nfs4_trigger_globals_t *);
128b9238976Sth 
129b9238976Sth /*
130b9238976Sth  * Used for ephemeral mounts; contains data either duplicated from
131b9238976Sth  * servinfo4_t, or hand-crafted, depending on type of ephemeral mount.
132b9238976Sth  *
133b9238976Sth  * It's intended that this structure is used solely for ephemeral
134b9238976Sth  * mount-type specific data, for passing this data to
135b9238976Sth  * nfs4_trigger_nargs_create().
136b9238976Sth  */
137b9238976Sth typedef struct ephemeral_servinfo {
138b9238976Sth 	char			*esi_hostname;
139b9238976Sth 	char			*esi_netname;
140b9238976Sth 	char			*esi_path;
141b9238976Sth 	int			esi_path_len;
142b9238976Sth 	int			esi_mount_flags;
143b9238976Sth 	struct netbuf		*esi_addr;
144b9238976Sth 	struct netbuf		*esi_syncaddr;
145b9238976Sth 	struct knetconfig	*esi_knconf;
146b9238976Sth } ephemeral_servinfo_t;
147b9238976Sth 
148b9238976Sth /*
149b9238976Sth  * Collect together the mount-type specific and generic data args.
150b9238976Sth  */
151b9238976Sth typedef struct domount_args {
152b9238976Sth 	ephemeral_servinfo_t	*dma_esi;
153b9238976Sth 	char			*dma_hostlist; /* comma-sep. for RO failover */
154b9238976Sth 	struct nfs_args		*dma_nargs;
155b9238976Sth } domount_args_t;
156b9238976Sth 
157b9238976Sth 
158b9238976Sth /*
159b9238976Sth  * The vnode ops functions for a trigger stub vnode
160b9238976Sth  */
161da6c28aaSamw static int nfs4_trigger_open(vnode_t **, int, cred_t *, caller_context_t *);
162da6c28aaSamw static int nfs4_trigger_getattr(vnode_t *, struct vattr *, int, cred_t *,
163da6c28aaSamw     caller_context_t *);
164da6c28aaSamw static int nfs4_trigger_setattr(vnode_t *, struct vattr *, int, cred_t *,
165da6c28aaSamw     caller_context_t *);
166da6c28aaSamw static int nfs4_trigger_access(vnode_t *, int, int, cred_t *,
167da6c28aaSamw     caller_context_t *);
168da6c28aaSamw static int nfs4_trigger_readlink(vnode_t *, struct uio *, cred_t *,
169da6c28aaSamw     caller_context_t *);
170da6c28aaSamw static int nfs4_trigger_lookup(vnode_t *, char *, vnode_t **,
171da6c28aaSamw     struct pathname *, int, vnode_t *, cred_t *, caller_context_t *,
172da6c28aaSamw     int *, pathname_t *);
173da6c28aaSamw static int nfs4_trigger_create(vnode_t *, char *, struct vattr *,
174da6c28aaSamw     enum vcexcl, int, vnode_t **, cred_t *, int, caller_context_t *,
175da6c28aaSamw     vsecattr_t *);
176da6c28aaSamw static int nfs4_trigger_remove(vnode_t *, char *, cred_t *, caller_context_t *,
177da6c28aaSamw     int);
178da6c28aaSamw static int nfs4_trigger_link(vnode_t *, vnode_t *, char *, cred_t *,
179da6c28aaSamw     caller_context_t *, int);
180da6c28aaSamw static int nfs4_trigger_rename(vnode_t *, char *, vnode_t *, char *,
181da6c28aaSamw     cred_t *, caller_context_t *, int);
182da6c28aaSamw static int nfs4_trigger_mkdir(vnode_t *, char *, struct vattr *,
183da6c28aaSamw     vnode_t **, cred_t *, caller_context_t *, int, vsecattr_t *vsecp);
184da6c28aaSamw static int nfs4_trigger_rmdir(vnode_t *, char *, vnode_t *, cred_t *,
185da6c28aaSamw     caller_context_t *, int);
186da6c28aaSamw static int nfs4_trigger_symlink(vnode_t *, char *, struct vattr *, char *,
187da6c28aaSamw     cred_t *, caller_context_t *, int);
188da6c28aaSamw static int nfs4_trigger_cmp(vnode_t *, vnode_t *, caller_context_t *);
189b9238976Sth 
190b9238976Sth /*
191b9238976Sth  * Regular NFSv4 vnodeops that we need to reference directly
192b9238976Sth  */
193da6c28aaSamw extern int	nfs4_getattr(vnode_t *, struct vattr *, int, cred_t *,
194da6c28aaSamw 		    caller_context_t *);
195da6c28aaSamw extern void	nfs4_inactive(vnode_t *, cred_t *, caller_context_t *);
196b9238976Sth extern int	nfs4_rwlock(vnode_t *, int, caller_context_t *);
197b9238976Sth extern void	nfs4_rwunlock(vnode_t *, int, caller_context_t *);
198b9238976Sth extern int	nfs4_lookup(vnode_t *, char *, vnode_t **,
199da6c28aaSamw 		    struct pathname *, int, vnode_t *, cred_t *,
200da6c28aaSamw 		    caller_context_t *, int *, pathname_t *);
201da6c28aaSamw extern int	nfs4_pathconf(vnode_t *, int, ulong_t *, cred_t *,
202da6c28aaSamw 		    caller_context_t *);
203da6c28aaSamw extern int	nfs4_getsecattr(vnode_t *, vsecattr_t *, int, cred_t *,
204da6c28aaSamw 		    caller_context_t *);
205da6c28aaSamw extern int	nfs4_fid(vnode_t *, fid_t *, caller_context_t *);
206da6c28aaSamw extern int	nfs4_realvp(vnode_t *, vnode_t **, caller_context_t *);
207b9238976Sth 
208546a3997SThomas Haynes static int	nfs4_trigger_mount(vnode_t *, cred_t *, vnode_t **);
209b9238976Sth static int	nfs4_trigger_domount(vnode_t *, domount_args_t *, vfs_t **,
2106962f5b8SThomas Haynes     cred_t *, vnode_t **);
211d16da320SSimon Klinkert static int 	nfs4_trigger_domount_args_create(vnode_t *, cred_t *,
212d16da320SSimon Klinkert     domount_args_t **dmap);
213b9238976Sth static void	nfs4_trigger_domount_args_destroy(domount_args_t *dma,
214b9238976Sth     vnode_t *vp);
2152f172c55SRobert Thurlow static ephemeral_servinfo_t *nfs4_trigger_esi_create(vnode_t *, servinfo4_t *,
2162f172c55SRobert Thurlow     cred_t *);
217b9238976Sth static void	nfs4_trigger_esi_destroy(ephemeral_servinfo_t *, vnode_t *);
218b9238976Sth static ephemeral_servinfo_t *nfs4_trigger_esi_create_mirrormount(vnode_t *,
219b9238976Sth     servinfo4_t *);
2202f172c55SRobert Thurlow static ephemeral_servinfo_t *nfs4_trigger_esi_create_referral(vnode_t *,
2212f172c55SRobert Thurlow     cred_t *);
222b9238976Sth static struct nfs_args 	*nfs4_trigger_nargs_create(mntinfo4_t *, servinfo4_t *,
223b9238976Sth     ephemeral_servinfo_t *);
224b9238976Sth static void	nfs4_trigger_nargs_destroy(struct nfs_args *);
225b9238976Sth static char	*nfs4_trigger_create_mntopts(vfs_t *);
226b9238976Sth static void	nfs4_trigger_destroy_mntopts(char *);
227b9238976Sth static int 	nfs4_trigger_add_mntopt(char *, char *, vfs_t *);
228b9238976Sth static enum clnt_stat nfs4_trigger_ping_server(servinfo4_t *, int);
2292f172c55SRobert Thurlow static enum clnt_stat nfs4_ping_server_common(struct knetconfig *,
2302f172c55SRobert Thurlow     struct netbuf *, int);
231b9238976Sth 
232b9238976Sth extern int	umount2_engine(vfs_t *, int, cred_t *, int);
233b9238976Sth 
234b9238976Sth vnodeops_t *nfs4_trigger_vnodeops;
235b9238976Sth 
236b9238976Sth /*
237b9238976Sth  * These are the vnodeops that we must define for stub vnodes.
238b9238976Sth  *
239b9238976Sth  *
240b9238976Sth  * Many of the VOPs defined for NFSv4 do not need to be defined here,
241b9238976Sth  * for various reasons. This will result in the VFS default function being
242b9238976Sth  * used:
243b9238976Sth  *
244b9238976Sth  * - These VOPs require a previous VOP_OPEN to have occurred. That will have
245b9238976Sth  *   lost the reference to the stub vnode, meaning these should not be called:
246b9238976Sth  *       close, read, write, ioctl, readdir, seek.
247b9238976Sth  *
248b9238976Sth  * - These VOPs are meaningless for vnodes without data pages. Since the
249b9238976Sth  *   stub vnode is of type VDIR, these should not be called:
250b9238976Sth  *       space, getpage, putpage, map, addmap, delmap, pageio, fsync.
251b9238976Sth  *
252b9238976Sth  * - These VOPs are otherwise not applicable, and should not be called:
253b9238976Sth  *       dump, setsecattr.
254b9238976Sth  *
255b9238976Sth  *
256b9238976Sth  * These VOPs we do not want to define, but nor do we want the VFS default
257b9238976Sth  * action. Instead, we specify the VFS error function, with fs_error(), but
258b9238976Sth  * note that fs_error() is not actually called. Instead it results in the
259b9238976Sth  * use of the error function defined for the particular VOP, in vn_ops_table[]:
260b9238976Sth  *
261b9238976Sth  * -   frlock, dispose, shrlock.
262b9238976Sth  *
263b9238976Sth  *
264b9238976Sth  * These VOPs we define to use the corresponding regular NFSv4 vnodeop.
265b9238976Sth  * NOTE: if any of these ops involve an OTW call with the stub FH, then
266b9238976Sth  * that call must be wrapped with save_mnt_secinfo()/check_mnt_secinfo()
267b9238976Sth  * to protect the security data in the servinfo4_t for the "parent"
268b9238976Sth  * filesystem that contains the stub.
269b9238976Sth  *
270b9238976Sth  * - These VOPs should not trigger a mount, so that "ls -l" does not:
271b9238976Sth  *       pathconf, getsecattr.
272b9238976Sth  *
273b9238976Sth  * - These VOPs would not make sense to trigger:
274b9238976Sth  *       inactive, rwlock, rwunlock, fid, realvp.
275b9238976Sth  */
276b9238976Sth const fs_operation_def_t nfs4_trigger_vnodeops_template[] = {
277b9238976Sth 	VOPNAME_OPEN,		{ .vop_open = nfs4_trigger_open },
278b9238976Sth 	VOPNAME_GETATTR,	{ .vop_getattr = nfs4_trigger_getattr },
279b9238976Sth 	VOPNAME_SETATTR,	{ .vop_setattr = nfs4_trigger_setattr },
280b9238976Sth 	VOPNAME_ACCESS,		{ .vop_access = nfs4_trigger_access },
281b9238976Sth 	VOPNAME_LOOKUP,		{ .vop_lookup = nfs4_trigger_lookup },
282b9238976Sth 	VOPNAME_CREATE,		{ .vop_create = nfs4_trigger_create },
283b9238976Sth 	VOPNAME_REMOVE,		{ .vop_remove = nfs4_trigger_remove },
284b9238976Sth 	VOPNAME_LINK,		{ .vop_link = nfs4_trigger_link },
285b9238976Sth 	VOPNAME_RENAME,		{ .vop_rename = nfs4_trigger_rename },
286b9238976Sth 	VOPNAME_MKDIR,		{ .vop_mkdir = nfs4_trigger_mkdir },
287b9238976Sth 	VOPNAME_RMDIR,		{ .vop_rmdir = nfs4_trigger_rmdir },
288b9238976Sth 	VOPNAME_SYMLINK,	{ .vop_symlink = nfs4_trigger_symlink },
289b9238976Sth 	VOPNAME_READLINK,	{ .vop_readlink = nfs4_trigger_readlink },
290b9238976Sth 	VOPNAME_INACTIVE, 	{ .vop_inactive = nfs4_inactive },
291b9238976Sth 	VOPNAME_FID,		{ .vop_fid = nfs4_fid },
292b9238976Sth 	VOPNAME_RWLOCK,		{ .vop_rwlock = nfs4_rwlock },
293b9238976Sth 	VOPNAME_RWUNLOCK,	{ .vop_rwunlock = nfs4_rwunlock },
294b9238976Sth 	VOPNAME_REALVP,		{ .vop_realvp = nfs4_realvp },
295b9238976Sth 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = nfs4_getsecattr },
296b9238976Sth 	VOPNAME_PATHCONF,	{ .vop_pathconf = nfs4_pathconf },
297b9238976Sth 	VOPNAME_FRLOCK,		{ .error = fs_error },
298b9238976Sth 	VOPNAME_DISPOSE,	{ .error = fs_error },
299b9238976Sth 	VOPNAME_SHRLOCK,	{ .error = fs_error },
300b9238976Sth 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
301b9238976Sth 	NULL, NULL
302b9238976Sth };
303b9238976Sth 
304d3a14591SThomas Haynes static void
305d708af74SThomas Haynes nfs4_ephemeral_tree_incr(nfs4_ephemeral_tree_t *net)
306d3a14591SThomas Haynes {
307d708af74SThomas Haynes 	ASSERT(mutex_owned(&net->net_cnt_lock));
308d3a14591SThomas Haynes 	net->net_refcnt++;
309d3a14591SThomas Haynes 	ASSERT(net->net_refcnt != 0);
310d708af74SThomas Haynes }
311d708af74SThomas Haynes 
312d708af74SThomas Haynes static void
313d708af74SThomas Haynes nfs4_ephemeral_tree_hold(nfs4_ephemeral_tree_t *net)
314d708af74SThomas Haynes {
315d708af74SThomas Haynes 	mutex_enter(&net->net_cnt_lock);
316d708af74SThomas Haynes 	nfs4_ephemeral_tree_incr(net);
317d3a14591SThomas Haynes 	mutex_exit(&net->net_cnt_lock);
318d3a14591SThomas Haynes }
319d3a14591SThomas Haynes 
320d3a14591SThomas Haynes /*
321d3a14591SThomas Haynes  * We need a safe way to decrement the refcnt whilst the
322d3a14591SThomas Haynes  * lock is being held.
323d3a14591SThomas Haynes  */
324d3a14591SThomas Haynes static void
325d3a14591SThomas Haynes nfs4_ephemeral_tree_decr(nfs4_ephemeral_tree_t *net)
326d3a14591SThomas Haynes {
327d3a14591SThomas Haynes 	ASSERT(mutex_owned(&net->net_cnt_lock));
328d3a14591SThomas Haynes 	ASSERT(net->net_refcnt != 0);
329d3a14591SThomas Haynes 	net->net_refcnt--;
330d3a14591SThomas Haynes }
331d3a14591SThomas Haynes 
332d3a14591SThomas Haynes static void
333d3a14591SThomas Haynes nfs4_ephemeral_tree_rele(nfs4_ephemeral_tree_t *net)
334d3a14591SThomas Haynes {
335d3a14591SThomas Haynes 	mutex_enter(&net->net_cnt_lock);
336d3a14591SThomas Haynes 	nfs4_ephemeral_tree_decr(net);
337d3a14591SThomas Haynes 	mutex_exit(&net->net_cnt_lock);
338d3a14591SThomas Haynes }
339d3a14591SThomas Haynes 
340b9238976Sth /*
341b9238976Sth  * Trigger ops for stub vnodes; for mirror mounts, etc.
342b9238976Sth  *
343b9238976Sth  * The general idea is that a "triggering" op will first call
344b9238976Sth  * nfs4_trigger_mount(), which will find out whether a mount has already
345b9238976Sth  * been triggered.
346b9238976Sth  *
347b9238976Sth  * If it has, then nfs4_trigger_mount() sets newvp to the root vnode
348b9238976Sth  * of the covering vfs.
349b9238976Sth  *
350b9238976Sth  * If a mount has not yet been triggered, nfs4_trigger_mount() will do so,
351b9238976Sth  * and again set newvp, as above.
352b9238976Sth  *
353b9238976Sth  * The triggering op may then re-issue the VOP by calling it on newvp.
354b9238976Sth  *
355b9238976Sth  * Note that some ops may perform custom action, and may or may not need
356b9238976Sth  * to trigger a mount.
357b9238976Sth  *
358b9238976Sth  * Some ops need to call the regular NFSv4 vnodeop for a stub vnode. We
359b9238976Sth  * obviously can't do this with VOP_<whatever>, since it's a stub vnode
360b9238976Sth  * and that would just recurse. Instead, we call the v4 op directly,
361b9238976Sth  * by name.  This is OK, since we know that the vnode is for NFSv4,
362b9238976Sth  * otherwise it couldn't be a stub.
363b9238976Sth  *
364b9238976Sth  */
365b9238976Sth 
366b9238976Sth static int
367da6c28aaSamw nfs4_trigger_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
368b9238976Sth {
369b9238976Sth 	int error;
370b9238976Sth 	vnode_t *newvp;
371b9238976Sth 
372546a3997SThomas Haynes 	error = nfs4_trigger_mount(*vpp, cr, &newvp);
373b9238976Sth 	if (error)
374b9238976Sth 		return (error);
375b9238976Sth 
376b9238976Sth 	/* Release the stub vnode, as we're losing the reference to it */
377b9238976Sth 	VN_RELE(*vpp);
378b9238976Sth 
379b9238976Sth 	/* Give the caller the root vnode of the newly-mounted fs */
380b9238976Sth 	*vpp = newvp;
381b9238976Sth 
382b9238976Sth 	/* return with VN_HELD(newvp) */
383da6c28aaSamw 	return (VOP_OPEN(vpp, flag, cr, ct));
384b9238976Sth }
385b9238976Sth 
3862f172c55SRobert Thurlow void
3872f172c55SRobert Thurlow nfs4_fake_attrs(vnode_t *vp, struct vattr *vap)
3882f172c55SRobert Thurlow {
3892f172c55SRobert Thurlow 	uint_t mask;
3902f172c55SRobert Thurlow 	timespec_t now;
3912f172c55SRobert Thurlow 
3922f172c55SRobert Thurlow 	/*
3932f172c55SRobert Thurlow 	 * Set some attributes here for referrals.
3942f172c55SRobert Thurlow 	 */
3952f172c55SRobert Thurlow 	mask = vap->va_mask;
3962f172c55SRobert Thurlow 	bzero(vap, sizeof (struct vattr));
3972f172c55SRobert Thurlow 	vap->va_mask	= mask;
3982f172c55SRobert Thurlow 	vap->va_uid	= 0;
3992f172c55SRobert Thurlow 	vap->va_gid	= 0;
4002f172c55SRobert Thurlow 	vap->va_nlink	= 1;
4012f172c55SRobert Thurlow 	vap->va_size	= 1;
4022f172c55SRobert Thurlow 	gethrestime(&now);
4032f172c55SRobert Thurlow 	vap->va_atime	= now;
4042f172c55SRobert Thurlow 	vap->va_mtime	= now;
4052f172c55SRobert Thurlow 	vap->va_ctime	= now;
4062f172c55SRobert Thurlow 	vap->va_type	= VDIR;
4072f172c55SRobert Thurlow 	vap->va_mode	= 0555;
4082f172c55SRobert Thurlow 	vap->va_fsid	= vp->v_vfsp->vfs_dev;
4092f172c55SRobert Thurlow 	vap->va_rdev	= 0;
4102f172c55SRobert Thurlow 	vap->va_blksize	= MAXBSIZE;
4112f172c55SRobert Thurlow 	vap->va_nblocks	= 1;
4122f172c55SRobert Thurlow 	vap->va_seq	= 0;
4132f172c55SRobert Thurlow }
4142f172c55SRobert Thurlow 
415b9238976Sth /*
416b9238976Sth  * For the majority of cases, nfs4_trigger_getattr() will not trigger
417b9238976Sth  * a mount. However, if ATTR_TRIGGER is set, we are being informed
418b9238976Sth  * that we need to force the mount before we attempt to determine
419b9238976Sth  * the attributes. The intent is an atomic operation for security
420b9238976Sth  * testing.
4212f172c55SRobert Thurlow  *
4222f172c55SRobert Thurlow  * If we're not triggering a mount, we can still inquire about the
4232f172c55SRobert Thurlow  * actual attributes from the server in the mirror mount case,
4242f172c55SRobert Thurlow  * and will return manufactured attributes for a referral (see
4252f172c55SRobert Thurlow  * the 'create' branch of find_referral_stubvp()).
426b9238976Sth  */
427b9238976Sth static int
428da6c28aaSamw nfs4_trigger_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
429da6c28aaSamw     caller_context_t *ct)
430b9238976Sth {
431b9238976Sth 	int error;
432b9238976Sth 
433b9238976Sth 	if (flags & ATTR_TRIGGER) {
434b9238976Sth 		vnode_t	*newvp;
435b9238976Sth 
436546a3997SThomas Haynes 		error = nfs4_trigger_mount(vp, cr, &newvp);
437b9238976Sth 		if (error)
438b9238976Sth 			return (error);
439b9238976Sth 
440da6c28aaSamw 		error = VOP_GETATTR(newvp, vap, flags, cr, ct);
441b9238976Sth 		VN_RELE(newvp);
4422f172c55SRobert Thurlow 
4432f172c55SRobert Thurlow 	} else if (RP_ISSTUB_MIRRORMOUNT(VTOR4(vp))) {
4442f172c55SRobert Thurlow 
445da6c28aaSamw 		error = nfs4_getattr(vp, vap, flags, cr, ct);
4462f172c55SRobert Thurlow 
4472f172c55SRobert Thurlow 	} else if (RP_ISSTUB_REFERRAL(VTOR4(vp))) {
4482f172c55SRobert Thurlow 
4492f172c55SRobert Thurlow 		nfs4_fake_attrs(vp, vap);
4502f172c55SRobert Thurlow 		error = 0;
451b9238976Sth 	}
452b9238976Sth 
453b9238976Sth 	return (error);
454b9238976Sth }
455b9238976Sth 
456b9238976Sth static int
457b9238976Sth nfs4_trigger_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
458*a17ce845SMarcel Telka     caller_context_t *ct)
459b9238976Sth {
460b9238976Sth 	int error;
461b9238976Sth 	vnode_t *newvp;
462b9238976Sth 
463546a3997SThomas Haynes 	error = nfs4_trigger_mount(vp, cr, &newvp);
464b9238976Sth 	if (error)
465b9238976Sth 		return (error);
466b9238976Sth 
467b9238976Sth 	error = VOP_SETATTR(newvp, vap, flags, cr, ct);
468b9238976Sth 	VN_RELE(newvp);
469b9238976Sth 
470b9238976Sth 	return (error);
471b9238976Sth }
472b9238976Sth 
473b9238976Sth static int
474da6c28aaSamw nfs4_trigger_access(vnode_t *vp, int mode, int flags, cred_t *cr,
475da6c28aaSamw     caller_context_t *ct)
476b9238976Sth {
477b9238976Sth 	int error;
478b9238976Sth 	vnode_t *newvp;
479b9238976Sth 
480546a3997SThomas Haynes 	error = nfs4_trigger_mount(vp, cr, &newvp);
481b9238976Sth 	if (error)
482b9238976Sth 		return (error);
483b9238976Sth 
484da6c28aaSamw 	error = VOP_ACCESS(newvp, mode, flags, cr, ct);
485b9238976Sth 	VN_RELE(newvp);
486b9238976Sth 
487b9238976Sth 	return (error);
488b9238976Sth }
489b9238976Sth 
490b9238976Sth static int
491da6c28aaSamw nfs4_trigger_lookup(vnode_t *dvp, char *nm, vnode_t **vpp,
492da6c28aaSamw     struct pathname *pnp, int flags, vnode_t *rdir, cred_t *cr,
493da6c28aaSamw     caller_context_t *ct, int *deflags, pathname_t *rpnp)
494b9238976Sth {
495b9238976Sth 	int error;
496b9238976Sth 	vnode_t *newdvp;
497b9238976Sth 	rnode4_t *drp = VTOR4(dvp);
498b9238976Sth 
499b9238976Sth 	ASSERT(RP_ISSTUB(drp));
500b9238976Sth 
501b9238976Sth 	/*
502b9238976Sth 	 * It's not legal to lookup ".." for an fs root, so we mustn't pass
503b9238976Sth 	 * that up. Instead, pass onto the regular op, regardless of whether
504b9238976Sth 	 * we've triggered a mount.
505b9238976Sth 	 */
506b9238976Sth 	if (strcmp(nm, "..") == 0)
5072f172c55SRobert Thurlow 		if (RP_ISSTUB_MIRRORMOUNT(drp)) {
5082f172c55SRobert Thurlow 			return (nfs4_lookup(dvp, nm, vpp, pnp, flags, rdir, cr,
5092f172c55SRobert Thurlow 			    ct, deflags, rpnp));
5102f172c55SRobert Thurlow 		} else if (RP_ISSTUB_REFERRAL(drp)) {
5112f172c55SRobert Thurlow 			/* Return the parent vnode */
5122f172c55SRobert Thurlow 			return (vtodv(dvp, vpp, cr, TRUE));
5132f172c55SRobert Thurlow 		}
514b9238976Sth 
515546a3997SThomas Haynes 	error = nfs4_trigger_mount(dvp, cr, &newdvp);
516b9238976Sth 	if (error)
517b9238976Sth 		return (error);
518b9238976Sth 
519da6c28aaSamw 	error = VOP_LOOKUP(newdvp, nm, vpp, pnp, flags, rdir, cr, ct,
520da6c28aaSamw 	    deflags, rpnp);
521b9238976Sth 	VN_RELE(newdvp);
522b9238976Sth 
523b9238976Sth 	return (error);
524b9238976Sth }
525b9238976Sth 
526b9238976Sth static int
527b9238976Sth nfs4_trigger_create(vnode_t *dvp, char *nm, struct vattr *va,
528da6c28aaSamw     enum vcexcl exclusive, int mode, vnode_t **vpp, cred_t *cr,
529da6c28aaSamw     int flags, caller_context_t *ct, vsecattr_t *vsecp)
530b9238976Sth {
531b9238976Sth 	int error;
532b9238976Sth 	vnode_t *newdvp;
533b9238976Sth 
534546a3997SThomas Haynes 	error = nfs4_trigger_mount(dvp, cr, &newdvp);
535b9238976Sth 	if (error)
536b9238976Sth 		return (error);
537b9238976Sth 
538da6c28aaSamw 	error = VOP_CREATE(newdvp, nm, va, exclusive, mode, vpp, cr,
539da6c28aaSamw 	    flags, ct, vsecp);
540b9238976Sth 	VN_RELE(newdvp);
541b9238976Sth 
542b9238976Sth 	return (error);
543b9238976Sth }
544b9238976Sth 
545b9238976Sth static int
546da6c28aaSamw nfs4_trigger_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct,
547da6c28aaSamw     int flags)
548b9238976Sth {
549b9238976Sth 	int error;
550b9238976Sth 	vnode_t *newdvp;
551b9238976Sth 
552546a3997SThomas Haynes 	error = nfs4_trigger_mount(dvp, cr, &newdvp);
553b9238976Sth 	if (error)
554b9238976Sth 		return (error);
555b9238976Sth 
556da6c28aaSamw 	error = VOP_REMOVE(newdvp, nm, cr, ct, flags);
557b9238976Sth 	VN_RELE(newdvp);
558b9238976Sth 
559b9238976Sth 	return (error);
560b9238976Sth }
561b9238976Sth 
562b9238976Sth static int
563da6c28aaSamw nfs4_trigger_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
564da6c28aaSamw     caller_context_t *ct, int flags)
565b9238976Sth {
566b9238976Sth 	int error;
567b9238976Sth 	vnode_t *newtdvp;
568b9238976Sth 
569546a3997SThomas Haynes 	error = nfs4_trigger_mount(tdvp, cr, &newtdvp);
570b9238976Sth 	if (error)
571b9238976Sth 		return (error);
572b9238976Sth 
573b9238976Sth 	/*
574b9238976Sth 	 * We don't check whether svp is a stub. Let the NFSv4 code
575b9238976Sth 	 * detect that error, and return accordingly.
576b9238976Sth 	 */
577da6c28aaSamw 	error = VOP_LINK(newtdvp, svp, tnm, cr, ct, flags);
578b9238976Sth 	VN_RELE(newtdvp);
579b9238976Sth 
580b9238976Sth 	return (error);
581b9238976Sth }
582b9238976Sth 
583b9238976Sth static int
584b9238976Sth nfs4_trigger_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
585da6c28aaSamw     cred_t *cr, caller_context_t *ct, int flags)
586b9238976Sth {
587b9238976Sth 	int error;
588b9238976Sth 	vnode_t *newsdvp;
589b9238976Sth 	rnode4_t *tdrp = VTOR4(tdvp);
590b9238976Sth 
591b9238976Sth 	/*
592b9238976Sth 	 * We know that sdvp is a stub, otherwise we would not be here.
593b9238976Sth 	 *
594b9238976Sth 	 * If tdvp is also be a stub, there are two possibilities: it
595b9238976Sth 	 * is either the same stub as sdvp [i.e. VN_CMP(sdvp, tdvp)]
596b9238976Sth 	 * or it is a different stub [!VN_CMP(sdvp, tdvp)].
597b9238976Sth 	 *
598b9238976Sth 	 * In the former case, just trigger sdvp, and treat tdvp as
599b9238976Sth 	 * though it were not a stub.
600b9238976Sth 	 *
601b9238976Sth 	 * In the latter case, it might be a different stub for the
602b9238976Sth 	 * same server fs as sdvp, or for a different server fs.
603b9238976Sth 	 * Regardless, from the client perspective this would still
604b9238976Sth 	 * be a cross-filesystem rename, and should not be allowed,
605b9238976Sth 	 * so return EXDEV, without triggering either mount.
606b9238976Sth 	 */
607b9238976Sth 	if (RP_ISSTUB(tdrp) && !VN_CMP(sdvp, tdvp))
608b9238976Sth 		return (EXDEV);
609b9238976Sth 
610546a3997SThomas Haynes 	error = nfs4_trigger_mount(sdvp, cr, &newsdvp);
611b9238976Sth 	if (error)
612b9238976Sth 		return (error);
613b9238976Sth 
614da6c28aaSamw 	error = VOP_RENAME(newsdvp, snm, tdvp, tnm, cr, ct, flags);
615b9238976Sth 
616b9238976Sth 	VN_RELE(newsdvp);
617b9238976Sth 
618b9238976Sth 	return (error);
619b9238976Sth }
620b9238976Sth 
621da6c28aaSamw /* ARGSUSED */
622b9238976Sth static int
623b9238976Sth nfs4_trigger_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp,
624da6c28aaSamw     cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
625b9238976Sth {
626b9238976Sth 	int error;
627b9238976Sth 	vnode_t *newdvp;
628b9238976Sth 
629546a3997SThomas Haynes 	error = nfs4_trigger_mount(dvp, cr, &newdvp);
630b9238976Sth 	if (error)
631b9238976Sth 		return (error);
632b9238976Sth 
633da6c28aaSamw 	error = VOP_MKDIR(newdvp, nm, va, vpp, cr, ct, flags, vsecp);
634b9238976Sth 	VN_RELE(newdvp);
635b9238976Sth 
636b9238976Sth 	return (error);
637b9238976Sth }
638b9238976Sth 
639b9238976Sth static int
640da6c28aaSamw nfs4_trigger_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
641da6c28aaSamw     caller_context_t *ct, int flags)
642b9238976Sth {
643b9238976Sth 	int error;
644b9238976Sth 	vnode_t *newdvp;
645b9238976Sth 
646546a3997SThomas Haynes 	error = nfs4_trigger_mount(dvp, cr, &newdvp);
647b9238976Sth 	if (error)
648b9238976Sth 		return (error);
649b9238976Sth 
650da6c28aaSamw 	error = VOP_RMDIR(newdvp, nm, cdir, cr, ct, flags);
651b9238976Sth 	VN_RELE(newdvp);
652b9238976Sth 
653b9238976Sth 	return (error);
654b9238976Sth }
655b9238976Sth 
656b9238976Sth static int
657b9238976Sth nfs4_trigger_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm,
658da6c28aaSamw     cred_t *cr, caller_context_t *ct, int flags)
659b9238976Sth {
660b9238976Sth 	int error;
661b9238976Sth 	vnode_t *newdvp;
662b9238976Sth 
663546a3997SThomas Haynes 	error = nfs4_trigger_mount(dvp, cr, &newdvp);
664b9238976Sth 	if (error)
665b9238976Sth 		return (error);
666b9238976Sth 
667da6c28aaSamw 	error = VOP_SYMLINK(newdvp, lnm, tva, tnm, cr, ct, flags);
668b9238976Sth 	VN_RELE(newdvp);
669b9238976Sth 
670b9238976Sth 	return (error);
671b9238976Sth }
672b9238976Sth 
673b9238976Sth static int
674da6c28aaSamw nfs4_trigger_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr,
675da6c28aaSamw     caller_context_t *ct)
676b9238976Sth {
677b9238976Sth 	int error;
678b9238976Sth 	vnode_t *newvp;
679b9238976Sth 
680546a3997SThomas Haynes 	error = nfs4_trigger_mount(vp, cr, &newvp);
681b9238976Sth 	if (error)
682b9238976Sth 		return (error);
683b9238976Sth 
684da6c28aaSamw 	error = VOP_READLINK(newvp, uiop, cr, ct);
685b9238976Sth 	VN_RELE(newvp);
686b9238976Sth 
687b9238976Sth 	return (error);
688b9238976Sth }
689b9238976Sth 
690b9238976Sth /* end of trigger vnode ops */
691b9238976Sth 
6926962f5b8SThomas Haynes /*
6936962f5b8SThomas Haynes  * See if the mount has already been done by another caller.
6946962f5b8SThomas Haynes  */
6956962f5b8SThomas Haynes static int
6966962f5b8SThomas Haynes nfs4_trigger_mounted_already(vnode_t *vp, vnode_t **newvpp,
6976962f5b8SThomas Haynes     bool_t *was_mounted, vfs_t **vfsp)
6986962f5b8SThomas Haynes {
6996962f5b8SThomas Haynes 	int		error;
7006962f5b8SThomas Haynes 	mntinfo4_t	*mi = VTOMI4(vp);
7016962f5b8SThomas Haynes 
7026962f5b8SThomas Haynes 	*was_mounted = FALSE;
7036962f5b8SThomas Haynes 
7046962f5b8SThomas Haynes 	error = vn_vfsrlock_wait(vp);
7056962f5b8SThomas Haynes 	if (error)
7066962f5b8SThomas Haynes 		return (error);
7076962f5b8SThomas Haynes 
7086962f5b8SThomas Haynes 	*vfsp = vn_mountedvfs(vp);
7096962f5b8SThomas Haynes 	if (*vfsp != NULL) {
7106962f5b8SThomas Haynes 		/* the mount has already occurred */
7116962f5b8SThomas Haynes 		error = VFS_ROOT(*vfsp, newvpp);
7126962f5b8SThomas Haynes 		if (!error) {
7136962f5b8SThomas Haynes 			/* need to update the reference time  */
7146962f5b8SThomas Haynes 			mutex_enter(&mi->mi_lock);
7156962f5b8SThomas Haynes 			if (mi->mi_ephemeral)
7166962f5b8SThomas Haynes 				mi->mi_ephemeral->ne_ref_time =
7176962f5b8SThomas Haynes 				    gethrestime_sec();
7186962f5b8SThomas Haynes 			mutex_exit(&mi->mi_lock);
7196962f5b8SThomas Haynes 
7206962f5b8SThomas Haynes 			*was_mounted = TRUE;
7216962f5b8SThomas Haynes 		}
7226962f5b8SThomas Haynes 	}
7236962f5b8SThomas Haynes 
7246962f5b8SThomas Haynes 	vn_vfsunlock(vp);
7256962f5b8SThomas Haynes 	return (0);
7266962f5b8SThomas Haynes }
7276962f5b8SThomas Haynes 
728b9238976Sth /*
7292f172c55SRobert Thurlow  * Mount upon a trigger vnode; for mirror-mounts, referrals, etc.
730b9238976Sth  *
731b9238976Sth  * The mount may have already occurred, via another thread. If not,
732b9238976Sth  * assemble the location information - which may require fetching - and
733b9238976Sth  * perform the mount.
734b9238976Sth  *
735b9238976Sth  * Sets newvp to be the root of the fs that is now covering vp. Note
736b9238976Sth  * that we return with VN_HELD(*newvp).
737b9238976Sth  *
738b9238976Sth  * The caller is responsible for passing the VOP onto the covering fs.
739b9238976Sth  */
740b9238976Sth static int
741546a3997SThomas Haynes nfs4_trigger_mount(vnode_t *vp, cred_t *cr, vnode_t **newvpp)
742b9238976Sth {
743b9238976Sth 	int			 error;
744b9238976Sth 	vfs_t			*vfsp;
745b9238976Sth 	rnode4_t		*rp = VTOR4(vp);
746b9238976Sth 	mntinfo4_t		*mi = VTOMI4(vp);
747b9238976Sth 	domount_args_t		*dma;
748b9238976Sth 
749b9238976Sth 	nfs4_ephemeral_tree_t	*net;
750b9238976Sth 
751b9238976Sth 	bool_t			must_unlock = FALSE;
752b9238976Sth 	bool_t			is_building = FALSE;
7536962f5b8SThomas Haynes 	bool_t			was_mounted = FALSE;
754b9238976Sth 
755546a3997SThomas Haynes 	cred_t			*mcred = NULL;
756b9238976Sth 
757b9238976Sth 	nfs4_trigger_globals_t	*ntg;
758b9238976Sth 
759b9238976Sth 	zone_t			*zone = curproc->p_zone;
760b9238976Sth 
761b9238976Sth 	ASSERT(RP_ISSTUB(rp));
762b9238976Sth 
763b9238976Sth 	*newvpp = NULL;
764b9238976Sth 
765b9238976Sth 	/*
766b9238976Sth 	 * Has the mount already occurred?
767b9238976Sth 	 */
7686962f5b8SThomas Haynes 	error = nfs4_trigger_mounted_already(vp, newvpp,
7696962f5b8SThomas Haynes 	    &was_mounted, &vfsp);
7706962f5b8SThomas Haynes 	if (error || was_mounted)
771b9238976Sth 		goto done;
772b9238976Sth 
773b9238976Sth 	ntg = zone_getspecific(nfs4_ephemeral_key, zone);
774b9238976Sth 	ASSERT(ntg != NULL);
775b9238976Sth 
776b9238976Sth 	mutex_enter(&mi->mi_lock);
777b9238976Sth 
778b9238976Sth 	/*
779b9238976Sth 	 * We need to lock down the ephemeral tree.
780b9238976Sth 	 */
781b9238976Sth 	if (mi->mi_ephemeral_tree == NULL) {
782b9238976Sth 		net = kmem_zalloc(sizeof (*net), KM_SLEEP);
783b9238976Sth 		mutex_init(&net->net_tree_lock, NULL, MUTEX_DEFAULT, NULL);
784b9238976Sth 		mutex_init(&net->net_cnt_lock, NULL, MUTEX_DEFAULT, NULL);
785b9238976Sth 		net->net_refcnt = 1;
786b9238976Sth 		net->net_status = NFS4_EPHEMERAL_TREE_BUILDING;
787b9238976Sth 		is_building = TRUE;
788b9238976Sth 
789b9238976Sth 		/*
790b9238976Sth 		 * We need to add it to the zone specific list for
791b9238976Sth 		 * automatic unmounting and harvesting of deadwood.
792b9238976Sth 		 */
793b9238976Sth 		mutex_enter(&ntg->ntg_forest_lock);
794b9238976Sth 		if (ntg->ntg_forest != NULL)
795b9238976Sth 			net->net_next = ntg->ntg_forest;
796b9238976Sth 		ntg->ntg_forest = net;
797b9238976Sth 		mutex_exit(&ntg->ntg_forest_lock);
798b9238976Sth 
799b9238976Sth 		/*
800b9238976Sth 		 * No lock order confusion with mi_lock because no
801b9238976Sth 		 * other node could have grabbed net_tree_lock.
802b9238976Sth 		 */
803b9238976Sth 		mutex_enter(&net->net_tree_lock);
804b9238976Sth 		mi->mi_ephemeral_tree = net;
805b9238976Sth 		net->net_mount = mi;
806b9238976Sth 		mutex_exit(&mi->mi_lock);
807b87f76edSThomas Haynes 
808b87f76edSThomas Haynes 		MI4_HOLD(mi);
809b87f76edSThomas Haynes 		VFS_HOLD(mi->mi_vfsp);
810b9238976Sth 	} else {
811b9238976Sth 		net = mi->mi_ephemeral_tree;
812d3a14591SThomas Haynes 		nfs4_ephemeral_tree_hold(net);
813d3a14591SThomas Haynes 
814d708af74SThomas Haynes 		mutex_exit(&mi->mi_lock);
815d708af74SThomas Haynes 
816d3a14591SThomas Haynes 		mutex_enter(&net->net_tree_lock);
817b9238976Sth 
818b9238976Sth 		/*
819d3a14591SThomas Haynes 		 * We can only procede if the tree is neither locked
820d3a14591SThomas Haynes 		 * nor being torn down.
821b9238976Sth 		 */
822d3a14591SThomas Haynes 		mutex_enter(&net->net_cnt_lock);
823d3a14591SThomas Haynes 		if (net->net_status & NFS4_EPHEMERAL_TREE_PROCESSING) {
824d3a14591SThomas Haynes 			nfs4_ephemeral_tree_decr(net);
825d3a14591SThomas Haynes 			mutex_exit(&net->net_cnt_lock);
826d3a14591SThomas Haynes 			mutex_exit(&net->net_tree_lock);
827d3a14591SThomas Haynes 
828d3a14591SThomas Haynes 			return (EIO);
829d3a14591SThomas Haynes 		}
830d3a14591SThomas Haynes 		mutex_exit(&net->net_cnt_lock);
831b9238976Sth 	}
832b9238976Sth 
833b9238976Sth 	mutex_enter(&net->net_cnt_lock);
834b9238976Sth 	net->net_status |= NFS4_EPHEMERAL_TREE_MOUNTING;
835b9238976Sth 	mutex_exit(&net->net_cnt_lock);
836b9238976Sth 
837b9238976Sth 	must_unlock = TRUE;
838b9238976Sth 
839d16da320SSimon Klinkert 	error = nfs4_trigger_domount_args_create(vp, cr, &dma);
840d16da320SSimon Klinkert 	if (error)
841b9238976Sth 		goto done;
842b9238976Sth 
843b9238976Sth 	/*
844b9238976Sth 	 * Note that since we define mirror mounts to work
845546a3997SThomas Haynes 	 * for any user, we simply extend the privileges of
846546a3997SThomas Haynes 	 * the user's credentials to allow the mount to
847546a3997SThomas Haynes 	 * proceed.
848b9238976Sth 	 */
849546a3997SThomas Haynes 	mcred = crdup(cr);
850546a3997SThomas Haynes 	if (mcred == NULL) {
851546a3997SThomas Haynes 		error = EINVAL;
852d16da320SSimon Klinkert 		nfs4_trigger_domount_args_destroy(dma, vp);
853546a3997SThomas Haynes 		goto done;
854546a3997SThomas Haynes 	}
855546a3997SThomas Haynes 
856546a3997SThomas Haynes 	crset_zone_privall(mcred);
8572f172c55SRobert Thurlow 	if (is_system_labeled())
8582f172c55SRobert Thurlow 		(void) setpflags(NET_MAC_AWARE, 1, mcred);
859b9238976Sth 
8606962f5b8SThomas Haynes 	error = nfs4_trigger_domount(vp, dma, &vfsp, mcred, newvpp);
861b9238976Sth 	nfs4_trigger_domount_args_destroy(dma, vp);
862b9238976Sth 
8632f172c55SRobert Thurlow 	DTRACE_PROBE2(nfs4clnt__func__referral__mount,
8642f172c55SRobert Thurlow 	    vnode_t *, vp, int, error);
8652f172c55SRobert Thurlow 
866546a3997SThomas Haynes 	crfree(mcred);
867b9238976Sth 
868b9238976Sth done:
8696962f5b8SThomas Haynes 
870b9238976Sth 	if (must_unlock) {
871b9238976Sth 		mutex_enter(&net->net_cnt_lock);
872b9238976Sth 		net->net_status &= ~NFS4_EPHEMERAL_TREE_MOUNTING;
8732f172c55SRobert Thurlow 
8742f172c55SRobert Thurlow 		/*
8752f172c55SRobert Thurlow 		 * REFCNT: If we are the root of the tree, then we need
8762f172c55SRobert Thurlow 		 * to keep a reference because we malloced the tree and
8772f172c55SRobert Thurlow 		 * this is where we tied it to our mntinfo.
8782f172c55SRobert Thurlow 		 *
8792f172c55SRobert Thurlow 		 * If we are not the root of the tree, then our tie to
8802f172c55SRobert Thurlow 		 * the mntinfo occured elsewhere and we need to
8812f172c55SRobert Thurlow 		 * decrement the reference to the tree.
8822f172c55SRobert Thurlow 		 */
883b9238976Sth 		if (is_building)
884b9238976Sth 			net->net_status &= ~NFS4_EPHEMERAL_TREE_BUILDING;
8852f172c55SRobert Thurlow 		else
8862f172c55SRobert Thurlow 			nfs4_ephemeral_tree_decr(net);
887b9238976Sth 		mutex_exit(&net->net_cnt_lock);
888b9238976Sth 
889b9238976Sth 		mutex_exit(&net->net_tree_lock);
890b9238976Sth 	}
891b9238976Sth 
892b9238976Sth 	if (!error && (newvpp == NULL || *newvpp == NULL))
893b9238976Sth 		error = ENOSYS;
894b9238976Sth 
895b9238976Sth 	return (error);
896b9238976Sth }
897b9238976Sth 
898b9238976Sth /*
899b9238976Sth  * Collect together both the generic & mount-type specific args.
900b9238976Sth  */
901d16da320SSimon Klinkert static int
902d16da320SSimon Klinkert nfs4_trigger_domount_args_create(vnode_t *vp, cred_t *cr, domount_args_t **dmap)
903b9238976Sth {
904b9238976Sth 	int nointr;
905b9238976Sth 	char *hostlist;
906b9238976Sth 	servinfo4_t *svp;
907b9238976Sth 	struct nfs_args *nargs, *nargs_head;
908b9238976Sth 	enum clnt_stat status;
909b9238976Sth 	ephemeral_servinfo_t *esi, *esi_first;
910b9238976Sth 	domount_args_t *dma;
911b9238976Sth 	mntinfo4_t *mi = VTOMI4(vp);
912b9238976Sth 
913b9238976Sth 	nointr = !(mi->mi_flags & MI4_INT);
914b9238976Sth 	hostlist = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
915b9238976Sth 
916b9238976Sth 	svp = mi->mi_curr_serv;
917b9238976Sth 	/* check if the current server is responding */
918b9238976Sth 	status = nfs4_trigger_ping_server(svp, nointr);
919b9238976Sth 	if (status == RPC_SUCCESS) {
9202f172c55SRobert Thurlow 		esi_first = nfs4_trigger_esi_create(vp, svp, cr);
921b9238976Sth 		if (esi_first == NULL) {
922b9238976Sth 			kmem_free(hostlist, MAXPATHLEN);
923d16da320SSimon Klinkert 			return (EINVAL);
924b9238976Sth 		}
925b9238976Sth 
926b9238976Sth 		(void) strlcpy(hostlist, esi_first->esi_hostname, MAXPATHLEN);
927b9238976Sth 
928b9238976Sth 		nargs_head = nfs4_trigger_nargs_create(mi, svp, esi_first);
929b9238976Sth 	} else {
930b9238976Sth 		/* current server did not respond */
931b9238976Sth 		esi_first = NULL;
932b9238976Sth 		nargs_head = NULL;
933b9238976Sth 	}
934b9238976Sth 	nargs = nargs_head;
935b9238976Sth 
936b9238976Sth 	/*
937b9238976Sth 	 * NFS RO failover.
938b9238976Sth 	 *
939b9238976Sth 	 * If we have multiple servinfo4 structures, linked via sv_next,
940b9238976Sth 	 * we must create one nfs_args for each, linking the nfs_args via
941b9238976Sth 	 * nfs_ext_u.nfs_extB.next.
942b9238976Sth 	 *
943b9238976Sth 	 * We need to build a corresponding esi for each, too, but that is
944b9238976Sth 	 * used solely for building nfs_args, and may be immediately
945b9238976Sth 	 * discarded, as domount() requires the info from just one esi,
946b9238976Sth 	 * but all the nfs_args.
947b9238976Sth 	 *
948b9238976Sth 	 * Currently, the NFS mount code will hang if not all servers
949b9238976Sth 	 * requested are available. To avoid that, we need to ping each
950b9238976Sth 	 * server, here, and remove it from the list if it is not
951b9238976Sth 	 * responding. This has the side-effect of that server then
952b9238976Sth 	 * being permanently unavailable for this failover mount, even if
953b9238976Sth 	 * it recovers. That's unfortunate, but the best we can do until
954b9238976Sth 	 * the mount code path is fixed.
955b9238976Sth 	 */
956b9238976Sth 
957b9238976Sth 	/*
958b9238976Sth 	 * If the current server was down, loop indefinitely until we find
959b9238976Sth 	 * at least one responsive server.
960b9238976Sth 	 */
961b9238976Sth 	do {
962b9238976Sth 		/* no locking needed for sv_next; it is only set at fs mount */
963b9238976Sth 		for (svp = mi->mi_servers; svp != NULL; svp = svp->sv_next) {
964b9238976Sth 			struct nfs_args *next;
965b9238976Sth 
966b9238976Sth 			/*
967b9238976Sth 			 * nargs_head: the head of the nfs_args list
968b9238976Sth 			 * nargs: the current tail of the list
969b9238976Sth 			 * next: the newly-created element to be added
970b9238976Sth 			 */
971b9238976Sth 
972b9238976Sth 			/*
973b9238976Sth 			 * We've already tried the current server, above;
974b9238976Sth 			 * if it was responding, we have already included it
975b9238976Sth 			 * and it may now be ignored.
976b9238976Sth 			 *
977b9238976Sth 			 * Otherwise, try it again, since it may now have
978b9238976Sth 			 * recovered.
979b9238976Sth 			 */
980b9238976Sth 			if (svp == mi->mi_curr_serv && esi_first != NULL)
981b9238976Sth 				continue;
982b9238976Sth 
983b9238976Sth 			(void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
984b9238976Sth 			if (svp->sv_flags & SV4_NOTINUSE) {
985b9238976Sth 				nfs_rw_exit(&svp->sv_lock);
986b9238976Sth 				continue;
987b9238976Sth 			}
988b9238976Sth 			nfs_rw_exit(&svp->sv_lock);
989b9238976Sth 
990b9238976Sth 			/* check if the server is responding */
991b9238976Sth 			status = nfs4_trigger_ping_server(svp, nointr);
992d16da320SSimon Klinkert 			if (status == RPC_INTR) {
993d16da320SSimon Klinkert 				kmem_free(hostlist, MAXPATHLEN);
994d16da320SSimon Klinkert 				nfs4_trigger_esi_destroy(esi_first, vp);
995d16da320SSimon Klinkert 				nargs = nargs_head;
996d16da320SSimon Klinkert 				while (nargs != NULL) {
997d16da320SSimon Klinkert 					next = nargs->nfs_ext_u.nfs_extB.next;
998d16da320SSimon Klinkert 					nfs4_trigger_nargs_destroy(nargs);
999d16da320SSimon Klinkert 					nargs = next;
1000d16da320SSimon Klinkert 				}
1001d16da320SSimon Klinkert 				return (EINTR);
1002d16da320SSimon Klinkert 			} else if (status != RPC_SUCCESS) {
1003d16da320SSimon Klinkert 				/* if the server did not respond, ignore it */
1004b9238976Sth 				continue;
1005d16da320SSimon Klinkert 			}
1006b9238976Sth 
10072f172c55SRobert Thurlow 			esi = nfs4_trigger_esi_create(vp, svp, cr);
1008b9238976Sth 			if (esi == NULL)
1009b9238976Sth 				continue;
1010b9238976Sth 
1011b9238976Sth 			/*
1012b9238976Sth 			 * If the original current server (mi_curr_serv)
1013b9238976Sth 			 * was down when when we first tried it,
1014b9238976Sth 			 * (i.e. esi_first == NULL),
1015b9238976Sth 			 * we select this new server (svp) to be the server
1016b9238976Sth 			 * that we will actually contact (esi_first).
1017b9238976Sth 			 *
1018b9238976Sth 			 * Note that it's possible that mi_curr_serv == svp,
1019b9238976Sth 			 * if that mi_curr_serv was down but has now recovered.
1020b9238976Sth 			 */
1021b9238976Sth 			next = nfs4_trigger_nargs_create(mi, svp, esi);
1022b9238976Sth 			if (esi_first == NULL) {
1023b9238976Sth 				ASSERT(nargs == NULL);
1024b9238976Sth 				ASSERT(nargs_head == NULL);
1025b9238976Sth 				nargs_head = next;
1026b9238976Sth 				esi_first = esi;
1027b9238976Sth 				(void) strlcpy(hostlist,
1028b9238976Sth 				    esi_first->esi_hostname, MAXPATHLEN);
1029b9238976Sth 			} else {
1030b9238976Sth 				ASSERT(nargs_head != NULL);
1031b9238976Sth 				nargs->nfs_ext_u.nfs_extB.next = next;
1032b9238976Sth 				(void) strlcat(hostlist, ",", MAXPATHLEN);
1033b9238976Sth 				(void) strlcat(hostlist, esi->esi_hostname,
1034b9238976Sth 				    MAXPATHLEN);
1035b9238976Sth 				/* esi was only needed for hostname & nargs */
1036b9238976Sth 				nfs4_trigger_esi_destroy(esi, vp);
1037b9238976Sth 			}
1038b9238976Sth 
1039b9238976Sth 			nargs = next;
1040b9238976Sth 		}
1041b9238976Sth 
1042b9238976Sth 		/* if we've had no response at all, wait a second */
1043b9238976Sth 		if (esi_first == NULL)
1044b9238976Sth 			delay(drv_usectohz(1000000));
1045b9238976Sth 
1046b9238976Sth 	} while (esi_first == NULL);
1047b9238976Sth 	ASSERT(nargs_head != NULL);
1048b9238976Sth 
1049b9238976Sth 	dma = kmem_zalloc(sizeof (domount_args_t), KM_SLEEP);
1050b9238976Sth 	dma->dma_esi = esi_first;
1051b9238976Sth 	dma->dma_hostlist = hostlist;
1052b9238976Sth 	dma->dma_nargs = nargs_head;
1053d16da320SSimon Klinkert 	*dmap = dma;
1054b9238976Sth 
1055d16da320SSimon Klinkert 	return (0);
1056b9238976Sth }
1057b9238976Sth 
1058b9238976Sth static void
1059b9238976Sth nfs4_trigger_domount_args_destroy(domount_args_t *dma, vnode_t *vp)
1060b9238976Sth {
1061b9238976Sth 	if (dma != NULL) {
1062b9238976Sth 		if (dma->dma_esi != NULL && vp != NULL)
1063b9238976Sth 			nfs4_trigger_esi_destroy(dma->dma_esi, vp);
1064b9238976Sth 
1065b9238976Sth 		if (dma->dma_hostlist != NULL)
1066b9238976Sth 			kmem_free(dma->dma_hostlist, MAXPATHLEN);
1067b9238976Sth 
1068b9238976Sth 		if (dma->dma_nargs != NULL) {
1069b9238976Sth 			struct nfs_args *nargs = dma->dma_nargs;
1070b9238976Sth 
1071b9238976Sth 			do {
1072b9238976Sth 				struct nfs_args *next =
1073b9238976Sth 				    nargs->nfs_ext_u.nfs_extB.next;
1074b9238976Sth 
1075b9238976Sth 				nfs4_trigger_nargs_destroy(nargs);
1076b9238976Sth 				nargs = next;
1077b9238976Sth 			} while (nargs != NULL);
1078b9238976Sth 		}
1079b9238976Sth 
1080b9238976Sth 		kmem_free(dma, sizeof (domount_args_t));
1081b9238976Sth 	}
1082b9238976Sth }
1083b9238976Sth 
1084b9238976Sth /*
1085b9238976Sth  * The ephemeral_servinfo_t struct contains basic information we will need to
1086b9238976Sth  * perform the mount. Whilst the structure is generic across different
1087b9238976Sth  * types of ephemeral mount, the way we gather its contents differs.
1088b9238976Sth  */
1089b9238976Sth static ephemeral_servinfo_t *
10902f172c55SRobert Thurlow nfs4_trigger_esi_create(vnode_t *vp, servinfo4_t *svp, cred_t *cr)
1091b9238976Sth {
1092b9238976Sth 	ephemeral_servinfo_t *esi;
1093b9238976Sth 	rnode4_t *rp = VTOR4(vp);
1094b9238976Sth 
1095b9238976Sth 	ASSERT(RP_ISSTUB(rp));
1096b9238976Sth 
1097b9238976Sth 	/* Call the ephemeral type-specific routine */
1098b9238976Sth 	if (RP_ISSTUB_MIRRORMOUNT(rp))
1099b9238976Sth 		esi = nfs4_trigger_esi_create_mirrormount(vp, svp);
11002f172c55SRobert Thurlow 	else if (RP_ISSTUB_REFERRAL(rp))
11012f172c55SRobert Thurlow 		esi = nfs4_trigger_esi_create_referral(vp, cr);
1102b9238976Sth 	else
1103b9238976Sth 		esi = NULL;
1104b9238976Sth 	return (esi);
1105b9238976Sth }
1106b9238976Sth 
1107b9238976Sth static void
1108b9238976Sth nfs4_trigger_esi_destroy(ephemeral_servinfo_t *esi, vnode_t *vp)
1109b9238976Sth {
1110b9238976Sth 	rnode4_t *rp = VTOR4(vp);
1111b9238976Sth 
1112b9238976Sth 	ASSERT(RP_ISSTUB(rp));
1113b9238976Sth 
1114b9238976Sth 	/* Currently, no need for an ephemeral type-specific routine */
1115b9238976Sth 
1116b9238976Sth 	/*
1117b9238976Sth 	 * The contents of ephemeral_servinfo_t goes into nfs_args,
1118b9238976Sth 	 * and will be handled by nfs4_trigger_nargs_destroy().
1119b9238976Sth 	 * We need only free the structure itself.
1120b9238976Sth 	 */
1121b9238976Sth 	if (esi != NULL)
1122b9238976Sth 		kmem_free(esi, sizeof (ephemeral_servinfo_t));
1123b9238976Sth }
1124b9238976Sth 
1125b9238976Sth /*
1126b9238976Sth  * Some of this may turn out to be common with other ephemeral types,
1127b9238976Sth  * in which case it should be moved to nfs4_trigger_esi_create(), or a
1128b9238976Sth  * common function called.
1129b9238976Sth  */
11302f172c55SRobert Thurlow 
11312f172c55SRobert Thurlow /*
11322f172c55SRobert Thurlow  * Mirror mounts case - should have all data available
11332f172c55SRobert Thurlow  */
1134b9238976Sth static ephemeral_servinfo_t *
1135b9238976Sth nfs4_trigger_esi_create_mirrormount(vnode_t *vp, servinfo4_t *svp)
1136b9238976Sth {
1137b9238976Sth 	char			*stubpath;
1138b9238976Sth 	struct knetconfig	*sikncp, *svkncp;
1139b9238976Sth 	struct netbuf		*bufp;
1140b9238976Sth 	ephemeral_servinfo_t	*esi;
1141b9238976Sth 
1142b9238976Sth 	esi = kmem_zalloc(sizeof (ephemeral_servinfo_t), KM_SLEEP);
1143b9238976Sth 
1144b9238976Sth 	/* initially set to be our type of ephemeral mount; may be added to */
1145b9238976Sth 	esi->esi_mount_flags = NFSMNT_MIRRORMOUNT;
1146b9238976Sth 
1147b9238976Sth 	/*
1148b9238976Sth 	 * We're copying info from the stub rnode's servinfo4, but
1149b9238976Sth 	 * we must create new copies, not pointers, since this information
1150b9238976Sth 	 * is to be associated with the new mount, which will be
1151b9238976Sth 	 * unmounted (and its structures freed) separately
1152b9238976Sth 	 */
1153b9238976Sth 
1154b9238976Sth 	/*
1155b9238976Sth 	 * Sizes passed to kmem_[z]alloc here must match those freed
1156b9238976Sth 	 * in nfs4_free_args()
1157b9238976Sth 	 */
1158b9238976Sth 
1159b9238976Sth 	/*
1160b9238976Sth 	 * We hold sv_lock across kmem_zalloc() calls that may sleep, but this
1161b9238976Sth 	 * is difficult to avoid: as we need to read svp to calculate the
1162b9238976Sth 	 * sizes to be allocated.
1163b9238976Sth 	 */
1164b9238976Sth 	(void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
1165b9238976Sth 
1166b9238976Sth 	esi->esi_hostname = kmem_zalloc(strlen(svp->sv_hostname) + 1, KM_SLEEP);
1167b9238976Sth 	(void) strcat(esi->esi_hostname, svp->sv_hostname);
1168b9238976Sth 
1169b9238976Sth 	esi->esi_addr = kmem_zalloc(sizeof (struct netbuf), KM_SLEEP);
1170b9238976Sth 	bufp = esi->esi_addr;
1171b9238976Sth 	bufp->len = svp->sv_addr.len;
1172b9238976Sth 	bufp->maxlen = svp->sv_addr.maxlen;
1173b9238976Sth 	bufp->buf = kmem_zalloc(bufp->len, KM_SLEEP);
1174b9238976Sth 	bcopy(svp->sv_addr.buf, bufp->buf, bufp->len);
1175b9238976Sth 
1176b9238976Sth 	esi->esi_knconf = kmem_zalloc(sizeof (*esi->esi_knconf), KM_SLEEP);
1177b9238976Sth 	sikncp = esi->esi_knconf;
1178b9238976Sth 	svkncp = svp->sv_knconf;
1179b9238976Sth 	sikncp->knc_semantics = svkncp->knc_semantics;
1180b9238976Sth 	sikncp->knc_protofmly = (caddr_t)kmem_zalloc(KNC_STRSIZE, KM_SLEEP);
1181b9238976Sth 	(void) strcat((char *)sikncp->knc_protofmly,
1182b9238976Sth 	    (char *)svkncp->knc_protofmly);
1183b9238976Sth 	sikncp->knc_proto = (caddr_t)kmem_zalloc(KNC_STRSIZE, KM_SLEEP);
1184b9238976Sth 	(void) strcat((char *)sikncp->knc_proto, (char *)svkncp->knc_proto);
1185b9238976Sth 	sikncp->knc_rdev = svkncp->knc_rdev;
1186b9238976Sth 
1187b9238976Sth 	/*
1188b9238976Sth 	 * Used when AUTH_DH is negotiated.
1189b9238976Sth 	 *
1190b9238976Sth 	 * This is ephemeral mount-type specific, since it contains the
1191b9238976Sth 	 * server's time-sync syncaddr.
1192b9238976Sth 	 */
1193b9238976Sth 	if (svp->sv_dhsec) {
1194b9238976Sth 		struct netbuf *bufp;
1195b9238976Sth 		sec_data_t *sdata;
1196b9238976Sth 		dh_k4_clntdata_t *data;
1197b9238976Sth 
1198b9238976Sth 		sdata = svp->sv_dhsec;
1199b9238976Sth 		data = (dh_k4_clntdata_t *)sdata->data;
1200b9238976Sth 		ASSERT(sdata->rpcflavor == AUTH_DH);
1201b9238976Sth 
1202b9238976Sth 		bufp = kmem_zalloc(sizeof (struct netbuf), KM_SLEEP);
1203b9238976Sth 		bufp->len = data->syncaddr.len;
1204b9238976Sth 		bufp->maxlen = data->syncaddr.maxlen;
1205b9238976Sth 		bufp->buf = kmem_zalloc(bufp->len, KM_SLEEP);
1206b9238976Sth 		bcopy(data->syncaddr.buf, bufp->buf, bufp->len);
1207b9238976Sth 		esi->esi_syncaddr = bufp;
1208b9238976Sth 
1209b9238976Sth 		if (data->netname != NULL) {
1210b9238976Sth 			int nmlen = data->netnamelen;
1211b9238976Sth 
1212b9238976Sth 			/*
1213b9238976Sth 			 * We need to copy from a dh_k4_clntdata_t
1214b9238976Sth 			 * netname/netnamelen pair to a NUL-terminated
1215b9238976Sth 			 * netname string suitable for putting in nfs_args,
1216b9238976Sth 			 * where the latter has no netnamelen field.
1217b9238976Sth 			 */
1218b9238976Sth 			esi->esi_netname = kmem_zalloc(nmlen + 1, KM_SLEEP);
1219b9238976Sth 			bcopy(data->netname, esi->esi_netname, nmlen);
1220b9238976Sth 		}
1221b9238976Sth 	} else {
1222b9238976Sth 		esi->esi_syncaddr = NULL;
1223b9238976Sth 		esi->esi_netname = NULL;
1224b9238976Sth 	}
1225b9238976Sth 
1226b9238976Sth 	stubpath = fn_path(VTOSV(vp)->sv_name);
1227b9238976Sth 	/* step over initial '.', to avoid e.g. sv_path: "/tank./ws" */
1228b9238976Sth 	ASSERT(*stubpath == '.');
1229b9238976Sth 	stubpath += 1;
1230b9238976Sth 
1231b9238976Sth 	/* for nfs_args->fh */
12322f172c55SRobert Thurlow 	esi->esi_path_len = strlen(stubpath) + 1;
12332f172c55SRobert Thurlow 	if (strcmp(svp->sv_path, "/") != 0)
12342f172c55SRobert Thurlow 		esi->esi_path_len += strlen(svp->sv_path);
1235b9238976Sth 	esi->esi_path = kmem_zalloc(esi->esi_path_len, KM_SLEEP);
12362f172c55SRobert Thurlow 	if (strcmp(svp->sv_path, "/") != 0)
12372f172c55SRobert Thurlow 		(void) strcat(esi->esi_path, svp->sv_path);
1238b9238976Sth 	(void) strcat(esi->esi_path, stubpath);
1239b9238976Sth 
1240b9238976Sth 	stubpath -= 1;
1241b9238976Sth 	/* stubpath allocated by fn_path() */
1242b9238976Sth 	kmem_free(stubpath, strlen(stubpath) + 1);
1243b9238976Sth 
1244b9238976Sth 	nfs_rw_exit(&svp->sv_lock);
1245b9238976Sth 
1246b9238976Sth 	return (esi);
1247b9238976Sth }
1248b9238976Sth 
12492f172c55SRobert Thurlow /*
12502f172c55SRobert Thurlow  * Makes an upcall to NFSMAPID daemon to resolve hostname of NFS server to
12512f172c55SRobert Thurlow  * get network information required to do the mount call.
12522f172c55SRobert Thurlow  */
12532f172c55SRobert Thurlow int
12542f172c55SRobert Thurlow nfs4_callmapid(utf8string *server, struct nfs_fsl_info *resp)
12552f172c55SRobert Thurlow {
12562f172c55SRobert Thurlow 	door_arg_t	door_args;
12572f172c55SRobert Thurlow 	door_handle_t	dh;
12582f172c55SRobert Thurlow 	XDR		xdr;
12592f172c55SRobert Thurlow 	refd_door_args_t *xdr_argsp;
12602f172c55SRobert Thurlow 	refd_door_res_t  *orig_resp;
12612f172c55SRobert Thurlow 	k_sigset_t	smask;
12622f172c55SRobert Thurlow 	int		xdr_len = 0;
12632f172c55SRobert Thurlow 	int 		res_len = 16; /* length of an ip adress */
12642f172c55SRobert Thurlow 	int		orig_reslen = res_len;
12652f172c55SRobert Thurlow 	int		error = 0;
12662f172c55SRobert Thurlow 	struct nfsidmap_globals *nig;
12672f172c55SRobert Thurlow 
12682f172c55SRobert Thurlow 	if (zone_status_get(curproc->p_zone) >= ZONE_IS_SHUTTING_DOWN)
12692f172c55SRobert Thurlow 		return (ECONNREFUSED);
12702f172c55SRobert Thurlow 
12712f172c55SRobert Thurlow 	nig = zone_getspecific(nfsidmap_zone_key, nfs_zone());
12722f172c55SRobert Thurlow 	ASSERT(nig != NULL);
12732f172c55SRobert Thurlow 
12742f172c55SRobert Thurlow 	mutex_enter(&nig->nfsidmap_daemon_lock);
12752f172c55SRobert Thurlow 	dh = nig->nfsidmap_daemon_dh;
12762f172c55SRobert Thurlow 	if (dh == NULL) {
12772f172c55SRobert Thurlow 		mutex_exit(&nig->nfsidmap_daemon_lock);
12782f172c55SRobert Thurlow 		cmn_err(CE_NOTE,
12792f172c55SRobert Thurlow 		    "nfs4_callmapid: nfsmapid daemon not " \
12802f172c55SRobert Thurlow 		    "running unable to resolve host name\n");
12812f172c55SRobert Thurlow 		return (EINVAL);
12822f172c55SRobert Thurlow 	}
12832f172c55SRobert Thurlow 	door_ki_hold(dh);
12842f172c55SRobert Thurlow 	mutex_exit(&nig->nfsidmap_daemon_lock);
12852f172c55SRobert Thurlow 
12862f172c55SRobert Thurlow 	xdr_len = xdr_sizeof(&(xdr_utf8string), server);
12872f172c55SRobert Thurlow 
12882f172c55SRobert Thurlow 	xdr_argsp = kmem_zalloc(xdr_len + sizeof (*xdr_argsp), KM_SLEEP);
12892f172c55SRobert Thurlow 	xdr_argsp->xdr_len = xdr_len;
12902f172c55SRobert Thurlow 	xdr_argsp->cmd = NFSMAPID_SRV_NETINFO;
12912f172c55SRobert Thurlow 
12922f172c55SRobert Thurlow 	xdrmem_create(&xdr, (char *)&xdr_argsp->xdr_arg,
12932f172c55SRobert Thurlow 	    xdr_len, XDR_ENCODE);
12942f172c55SRobert Thurlow 
12952f172c55SRobert Thurlow 	if (!xdr_utf8string(&xdr, server)) {
12962f172c55SRobert Thurlow 		kmem_free(xdr_argsp, xdr_len + sizeof (*xdr_argsp));
12972f172c55SRobert Thurlow 		door_ki_rele(dh);
12982f172c55SRobert Thurlow 		return (1);
12992f172c55SRobert Thurlow 	}
13002f172c55SRobert Thurlow 
13012f172c55SRobert Thurlow 	if (orig_reslen)
13022f172c55SRobert Thurlow 		orig_resp = kmem_alloc(orig_reslen, KM_SLEEP);
13032f172c55SRobert Thurlow 
13042f172c55SRobert Thurlow 	door_args.data_ptr = (char *)xdr_argsp;
13052f172c55SRobert Thurlow 	door_args.data_size = sizeof (*xdr_argsp) + xdr_argsp->xdr_len;
13062f172c55SRobert Thurlow 	door_args.desc_ptr = NULL;
13072f172c55SRobert Thurlow 	door_args.desc_num = 0;
13082f172c55SRobert Thurlow 	door_args.rbuf = orig_resp ? (char *)orig_resp : NULL;
13092f172c55SRobert Thurlow 	door_args.rsize = res_len;
13102f172c55SRobert Thurlow 
13112f172c55SRobert Thurlow 	sigintr(&smask, 1);
13122f172c55SRobert Thurlow 	error = door_ki_upcall(dh, &door_args);
13132f172c55SRobert Thurlow 	sigunintr(&smask);
13142f172c55SRobert Thurlow 
13152f172c55SRobert Thurlow 	door_ki_rele(dh);
13162f172c55SRobert Thurlow 
13172f172c55SRobert Thurlow 	kmem_free(xdr_argsp, xdr_len + sizeof (*xdr_argsp));
13182f172c55SRobert Thurlow 	if (error) {
13192f172c55SRobert Thurlow 		kmem_free(orig_resp, orig_reslen);
13202f172c55SRobert Thurlow 		/*
13212f172c55SRobert Thurlow 		 * There is no door to connect to. The referral daemon
13222f172c55SRobert Thurlow 		 * must not be running yet.
13232f172c55SRobert Thurlow 		 */
13242f172c55SRobert Thurlow 		cmn_err(CE_WARN,
13252f172c55SRobert Thurlow 		    "nfsmapid not running cannot resolve host name");
13262f172c55SRobert Thurlow 		goto out;
13272f172c55SRobert Thurlow 	}
13282f172c55SRobert Thurlow 
13292f172c55SRobert Thurlow 	/*
13302f172c55SRobert Thurlow 	 * If the results buffer passed back are not the same as
13312f172c55SRobert Thurlow 	 * what was sent free the old buffer and use the new one.
13322f172c55SRobert Thurlow 	 */
13332f172c55SRobert Thurlow 	if (orig_resp && orig_reslen) {
13342f172c55SRobert Thurlow 		refd_door_res_t *door_resp;
13352f172c55SRobert Thurlow 
13362f172c55SRobert Thurlow 		door_resp = (refd_door_res_t *)door_args.rbuf;
13372f172c55SRobert Thurlow 		if ((void *)door_args.rbuf != orig_resp)
13382f172c55SRobert Thurlow 			kmem_free(orig_resp, orig_reslen);
13392f172c55SRobert Thurlow 		if (door_resp->res_status == 0) {
13402f172c55SRobert Thurlow 			xdrmem_create(&xdr, (char *)&door_resp->xdr_res,
13412f172c55SRobert Thurlow 			    door_resp->xdr_len, XDR_DECODE);
13422f172c55SRobert Thurlow 			bzero(resp, sizeof (struct nfs_fsl_info));
13432f172c55SRobert Thurlow 			if (!xdr_nfs_fsl_info(&xdr, resp)) {
13442f172c55SRobert Thurlow 				DTRACE_PROBE2(
13452f172c55SRobert Thurlow 				    nfs4clnt__debug__referral__upcall__xdrfail,
13462f172c55SRobert Thurlow 				    struct nfs_fsl_info *, resp,
13472f172c55SRobert Thurlow 				    char *, "nfs4_callmapid");
13482f172c55SRobert Thurlow 				error = EINVAL;
13492f172c55SRobert Thurlow 			}
13502f172c55SRobert Thurlow 		} else {
13512f172c55SRobert Thurlow 			DTRACE_PROBE2(
13522f172c55SRobert Thurlow 			    nfs4clnt__debug__referral__upcall__badstatus,
13532f172c55SRobert Thurlow 			    int, door_resp->res_status,
13542f172c55SRobert Thurlow 			    char *, "nfs4_callmapid");
13552f172c55SRobert Thurlow 			error = door_resp->res_status;
13562f172c55SRobert Thurlow 		}
13572f172c55SRobert Thurlow 		kmem_free(door_args.rbuf, door_args.rsize);
13582f172c55SRobert Thurlow 	}
13592f172c55SRobert Thurlow out:
13602f172c55SRobert Thurlow 	DTRACE_PROBE2(nfs4clnt__func__referral__upcall,
13612f172c55SRobert Thurlow 	    char *, server, int, error);
13622f172c55SRobert Thurlow 	return (error);
13632f172c55SRobert Thurlow }
13642f172c55SRobert Thurlow 
13652f172c55SRobert Thurlow /*
13662f172c55SRobert Thurlow  * Fetches the fs_locations attribute. Typically called
13672f172c55SRobert Thurlow  * from a Replication/Migration/Referrals/Mirror-mount context
13682f172c55SRobert Thurlow  *
13692f172c55SRobert Thurlow  * Fills in the attributes in garp. The caller is assumed
13702f172c55SRobert Thurlow  * to have allocated memory for garp.
13712f172c55SRobert Thurlow  *
13722f172c55SRobert Thurlow  * lock: if set do not lock s_recovlock and mi_recovlock mutex,
13732f172c55SRobert Thurlow  *	 it's already done by caller. Otherwise lock these mutexes
13742f172c55SRobert Thurlow  *	 before doing the rfs4call().
13752f172c55SRobert Thurlow  *
13762f172c55SRobert Thurlow  * Returns
13772f172c55SRobert Thurlow  * 	1	 for success
13782f172c55SRobert Thurlow  * 	0	 for failure
13792f172c55SRobert Thurlow  */
13802f172c55SRobert Thurlow int
13812f172c55SRobert Thurlow nfs4_fetch_locations(mntinfo4_t *mi, nfs4_sharedfh_t *sfh, char *nm,
13822f172c55SRobert Thurlow     cred_t *cr, nfs4_ga_res_t *garp, COMPOUND4res_clnt *callres, bool_t lock)
13832f172c55SRobert Thurlow {
13842f172c55SRobert Thurlow 	COMPOUND4args_clnt args;
13852f172c55SRobert Thurlow 	COMPOUND4res_clnt res;
13862f172c55SRobert Thurlow 	nfs_argop4 *argop;
13872f172c55SRobert Thurlow 	int argoplist_size = 3 * sizeof (nfs_argop4);
13882f172c55SRobert Thurlow 	nfs4_server_t *sp = NULL;
13892f172c55SRobert Thurlow 	int doqueue = 1;
13902f172c55SRobert Thurlow 	nfs4_error_t e = { 0, NFS4_OK, RPC_SUCCESS };
13912f172c55SRobert Thurlow 	int retval = 1;
13922f172c55SRobert Thurlow 	struct nfs4_clnt *nfscl;
13932f172c55SRobert Thurlow 
13942f172c55SRobert Thurlow 	if (lock == TRUE)
13952f172c55SRobert Thurlow 		(void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0);
13962f172c55SRobert Thurlow 	else
13972f172c55SRobert Thurlow 		ASSERT(nfs_rw_lock_held(&mi->mi_recovlock, RW_READER) ||
13982f172c55SRobert Thurlow 		    nfs_rw_lock_held(&mi->mi_recovlock, RW_WRITER));
13992f172c55SRobert Thurlow 
14002f172c55SRobert Thurlow 	sp = find_nfs4_server(mi);
14012f172c55SRobert Thurlow 	if (lock == TRUE)
14022f172c55SRobert Thurlow 		nfs_rw_exit(&mi->mi_recovlock);
14032f172c55SRobert Thurlow 
14042f172c55SRobert Thurlow 	if (sp != NULL)
14052f172c55SRobert Thurlow 		mutex_exit(&sp->s_lock);
14062f172c55SRobert Thurlow 
14072f172c55SRobert Thurlow 	if (lock == TRUE) {
14082f172c55SRobert Thurlow 		if (sp != NULL)
14092f172c55SRobert Thurlow 			(void) nfs_rw_enter_sig(&sp->s_recovlock,
14102f172c55SRobert Thurlow 			    RW_WRITER, 0);
14112f172c55SRobert Thurlow 		(void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_WRITER, 0);
14122f172c55SRobert Thurlow 	} else {
14132f172c55SRobert Thurlow 		if (sp != NULL) {
14142f172c55SRobert Thurlow 			ASSERT(nfs_rw_lock_held(&sp->s_recovlock, RW_READER) ||
14152f172c55SRobert Thurlow 			    nfs_rw_lock_held(&sp->s_recovlock, RW_WRITER));
14162f172c55SRobert Thurlow 		}
14172f172c55SRobert Thurlow 	}
14182f172c55SRobert Thurlow 
14192f172c55SRobert Thurlow 	/*
14202f172c55SRobert Thurlow 	 * Do we want to do the setup for recovery here?
14212f172c55SRobert Thurlow 	 *
14222f172c55SRobert Thurlow 	 * We know that the server responded to a null ping a very
14232f172c55SRobert Thurlow 	 * short time ago, and we know that we intend to do a
14242f172c55SRobert Thurlow 	 * single stateless operation - we want to fetch attributes,
14252f172c55SRobert Thurlow 	 * so we know we can't encounter errors about state.  If
14262f172c55SRobert Thurlow 	 * something goes wrong with the GETATTR, like not being
14272f172c55SRobert Thurlow 	 * able to get a response from the server or getting any
14282f172c55SRobert Thurlow 	 * kind of FH error, we should fail the mount.
14292f172c55SRobert Thurlow 	 *
14302f172c55SRobert Thurlow 	 * We may want to re-visited this at a later time.
14312f172c55SRobert Thurlow 	 */
14322f172c55SRobert Thurlow 	argop = kmem_alloc(argoplist_size, KM_SLEEP);
14332f172c55SRobert Thurlow 
14342f172c55SRobert Thurlow 	args.ctag = TAG_GETATTR_FSLOCATION;
14352f172c55SRobert Thurlow 	/* PUTFH LOOKUP GETATTR */
14362f172c55SRobert Thurlow 	args.array_len = 3;
14372f172c55SRobert Thurlow 	args.array = argop;
14382f172c55SRobert Thurlow 
14392f172c55SRobert Thurlow 	/* 0. putfh file */
14402f172c55SRobert Thurlow 	argop[0].argop = OP_CPUTFH;
14412f172c55SRobert Thurlow 	argop[0].nfs_argop4_u.opcputfh.sfh = sfh;
14422f172c55SRobert Thurlow 
14432f172c55SRobert Thurlow 	/* 1. lookup name, can't be dotdot */
14442f172c55SRobert Thurlow 	argop[1].argop = OP_CLOOKUP;
14452f172c55SRobert Thurlow 	argop[1].nfs_argop4_u.opclookup.cname = nm;
14462f172c55SRobert Thurlow 
14472f172c55SRobert Thurlow 	/* 2. file attrs */
14482f172c55SRobert Thurlow 	argop[2].argop = OP_GETATTR;
14492f172c55SRobert Thurlow 	argop[2].nfs_argop4_u.opgetattr.attr_request =
14502f172c55SRobert Thurlow 	    FATTR4_FSID_MASK | FATTR4_FS_LOCATIONS_MASK |
14512f172c55SRobert Thurlow 	    FATTR4_MOUNTED_ON_FILEID_MASK;
14522f172c55SRobert Thurlow 	argop[2].nfs_argop4_u.opgetattr.mi = mi;
14532f172c55SRobert Thurlow 
14542f172c55SRobert Thurlow 	rfs4call(mi, &args, &res, cr, &doqueue, 0, &e);
14552f172c55SRobert Thurlow 
14562f172c55SRobert Thurlow 	if (lock == TRUE) {
14572f172c55SRobert Thurlow 		nfs_rw_exit(&mi->mi_recovlock);
14582f172c55SRobert Thurlow 		if (sp != NULL)
14592f172c55SRobert Thurlow 			nfs_rw_exit(&sp->s_recovlock);
14602f172c55SRobert Thurlow 	}
14612f172c55SRobert Thurlow 
14622f172c55SRobert Thurlow 	nfscl = zone_getspecific(nfs4clnt_zone_key, nfs_zone());
14632f172c55SRobert Thurlow 	nfscl->nfscl_stat.referrals.value.ui64++;
14642f172c55SRobert Thurlow 	DTRACE_PROBE3(nfs4clnt__func__referral__fsloc,
14652f172c55SRobert Thurlow 	    nfs4_sharedfh_t *, sfh, char *, nm, nfs4_error_t *, &e);
14662f172c55SRobert Thurlow 
14672f172c55SRobert Thurlow 	if (e.error != 0) {
14682f172c55SRobert Thurlow 		if (sp != NULL)
14692f172c55SRobert Thurlow 			nfs4_server_rele(sp);
14702f172c55SRobert Thurlow 		kmem_free(argop, argoplist_size);
14712f172c55SRobert Thurlow 		return (0);
14722f172c55SRobert Thurlow 	}
14732f172c55SRobert Thurlow 
14742f172c55SRobert Thurlow 	/*
14752f172c55SRobert Thurlow 	 * Check for all possible error conditions.
14762f172c55SRobert Thurlow 	 * For valid replies without an ops array or for illegal
14772f172c55SRobert Thurlow 	 * replies, return a failure.
14782f172c55SRobert Thurlow 	 */
14792f172c55SRobert Thurlow 	if (res.status != NFS4_OK || res.array_len < 3 ||
14802f172c55SRobert Thurlow 	    res.array[2].nfs_resop4_u.opgetattr.status != NFS4_OK) {
14812f172c55SRobert Thurlow 		retval = 0;
14822f172c55SRobert Thurlow 		goto exit;
14832f172c55SRobert Thurlow 	}
14842f172c55SRobert Thurlow 
14852f172c55SRobert Thurlow 	/*
14862f172c55SRobert Thurlow 	 * There isn't much value in putting the attributes
14872f172c55SRobert Thurlow 	 * in the attr cache since fs_locations4 aren't
14882f172c55SRobert Thurlow 	 * encountered very frequently, so just make them
14892f172c55SRobert Thurlow 	 * available to the caller.
14902f172c55SRobert Thurlow 	 */
14912f172c55SRobert Thurlow 	*garp = res.array[2].nfs_resop4_u.opgetattr.ga_res;
14922f172c55SRobert Thurlow 
14932f172c55SRobert Thurlow 	DTRACE_PROBE2(nfs4clnt__debug__referral__fsloc,
14942f172c55SRobert Thurlow 	    nfs4_ga_res_t *, garp, char *, "nfs4_fetch_locations");
14952f172c55SRobert Thurlow 
14962f172c55SRobert Thurlow 	/* No fs_locations? -- return a failure */
14972f172c55SRobert Thurlow 	if (garp->n4g_ext_res == NULL ||
14982f172c55SRobert Thurlow 	    garp->n4g_ext_res->n4g_fslocations.locations_val == NULL) {
14992f172c55SRobert Thurlow 		retval = 0;
15002f172c55SRobert Thurlow 		goto exit;
15012f172c55SRobert Thurlow 	}
15022f172c55SRobert Thurlow 
15032f172c55SRobert Thurlow 	if (!garp->n4g_fsid_valid)
15042f172c55SRobert Thurlow 		retval = 0;
15052f172c55SRobert Thurlow 
15062f172c55SRobert Thurlow exit:
15072f172c55SRobert Thurlow 	if (retval == 0) {
15082f172c55SRobert Thurlow 		/* the call was ok but failed validating the call results */
1509*a17ce845SMarcel Telka 		xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&res);
15102f172c55SRobert Thurlow 	} else {
15112f172c55SRobert Thurlow 		ASSERT(callres != NULL);
15122f172c55SRobert Thurlow 		*callres = res;
15132f172c55SRobert Thurlow 	}
15142f172c55SRobert Thurlow 
15152f172c55SRobert Thurlow 	if (sp != NULL)
15162f172c55SRobert Thurlow 		nfs4_server_rele(sp);
15172f172c55SRobert Thurlow 	kmem_free(argop, argoplist_size);
15182f172c55SRobert Thurlow 	return (retval);
15192f172c55SRobert Thurlow }
15202f172c55SRobert Thurlow 
15212f172c55SRobert Thurlow /* tunable to disable referral mounts */
15222f172c55SRobert Thurlow int nfs4_no_referrals = 0;
15232f172c55SRobert Thurlow 
15242f172c55SRobert Thurlow /*
15252f172c55SRobert Thurlow  * Returns NULL if the vnode cannot be created or found.
15262f172c55SRobert Thurlow  */
15272f172c55SRobert Thurlow vnode_t *
15282f172c55SRobert Thurlow find_referral_stubvp(vnode_t *dvp, char *nm, cred_t *cr)
15292f172c55SRobert Thurlow {
15302f172c55SRobert Thurlow 	nfs_fh4 *stub_fh, *dfh;
15312f172c55SRobert Thurlow 	nfs4_sharedfh_t *sfhp;
15322f172c55SRobert Thurlow 	char *newfhval;
15332f172c55SRobert Thurlow 	vnode_t *vp = NULL;
15342f172c55SRobert Thurlow 	fattr4_mounted_on_fileid mnt_on_fileid;
15352f172c55SRobert Thurlow 	nfs4_ga_res_t garp;
15362f172c55SRobert Thurlow 	mntinfo4_t *mi;
15372f172c55SRobert Thurlow 	COMPOUND4res_clnt callres;
15382f172c55SRobert Thurlow 	hrtime_t t;
15392f172c55SRobert Thurlow 
15402f172c55SRobert Thurlow 	if (nfs4_no_referrals)
15412f172c55SRobert Thurlow 		return (NULL);
15422f172c55SRobert Thurlow 
15432f172c55SRobert Thurlow 	/*
15442f172c55SRobert Thurlow 	 * Get the mounted_on_fileid, unique on that server::fsid
15452f172c55SRobert Thurlow 	 */
15462f172c55SRobert Thurlow 	mi = VTOMI4(dvp);
15472f172c55SRobert Thurlow 	if (nfs4_fetch_locations(mi, VTOR4(dvp)->r_fh, nm, cr,
15482f172c55SRobert Thurlow 	    &garp, &callres, FALSE) == 0)
15492f172c55SRobert Thurlow 		return (NULL);
15502f172c55SRobert Thurlow 	mnt_on_fileid = garp.n4g_mon_fid;
1551*a17ce845SMarcel Telka 	xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&callres);
15522f172c55SRobert Thurlow 
15532f172c55SRobert Thurlow 	/*
15542f172c55SRobert Thurlow 	 * Build a fake filehandle from the dir FH and the mounted_on_fileid
15552f172c55SRobert Thurlow 	 */
15562f172c55SRobert Thurlow 	dfh = &VTOR4(dvp)->r_fh->sfh_fh;
15572f172c55SRobert Thurlow 	stub_fh = kmem_alloc(sizeof (nfs_fh4), KM_SLEEP);
15582f172c55SRobert Thurlow 	stub_fh->nfs_fh4_val = kmem_alloc(dfh->nfs_fh4_len +
15592f172c55SRobert Thurlow 	    sizeof (fattr4_mounted_on_fileid), KM_SLEEP);
15602f172c55SRobert Thurlow 	newfhval = stub_fh->nfs_fh4_val;
15612f172c55SRobert Thurlow 
15622f172c55SRobert Thurlow 	/* copy directory's file handle */
15632f172c55SRobert Thurlow 	bcopy(dfh->nfs_fh4_val, newfhval, dfh->nfs_fh4_len);
15642f172c55SRobert Thurlow 	stub_fh->nfs_fh4_len = dfh->nfs_fh4_len;
15652f172c55SRobert Thurlow 	newfhval = newfhval + dfh->nfs_fh4_len;
15662f172c55SRobert Thurlow 
15672f172c55SRobert Thurlow 	/* Add mounted_on_fileid. Use bcopy to avoid alignment problem */
15682f172c55SRobert Thurlow 	bcopy((char *)&mnt_on_fileid, newfhval,
15692f172c55SRobert Thurlow 	    sizeof (fattr4_mounted_on_fileid));
15702f172c55SRobert Thurlow 	stub_fh->nfs_fh4_len += sizeof (fattr4_mounted_on_fileid);
15712f172c55SRobert Thurlow 
15722f172c55SRobert Thurlow 	sfhp = sfh4_put(stub_fh, VTOMI4(dvp), NULL);
15732f172c55SRobert Thurlow 	kmem_free(stub_fh->nfs_fh4_val, dfh->nfs_fh4_len +
15742f172c55SRobert Thurlow 	    sizeof (fattr4_mounted_on_fileid));
15752f172c55SRobert Thurlow 	kmem_free(stub_fh, sizeof (nfs_fh4));
15762f172c55SRobert Thurlow 	if (sfhp == NULL)
15772f172c55SRobert Thurlow 		return (NULL);
15782f172c55SRobert Thurlow 
15792f172c55SRobert Thurlow 	t = gethrtime();
15802f172c55SRobert Thurlow 	garp.n4g_va.va_type = VDIR;
15812f172c55SRobert Thurlow 	vp = makenfs4node(sfhp, NULL, dvp->v_vfsp, t,
15822f172c55SRobert Thurlow 	    cr, dvp, fn_get(VTOSV(dvp)->sv_name, nm, sfhp));
15832f172c55SRobert Thurlow 
15842f172c55SRobert Thurlow 	if (vp != NULL)
15852f172c55SRobert Thurlow 		vp->v_type = VDIR;
15862f172c55SRobert Thurlow 
15872f172c55SRobert Thurlow 	sfh4_rele(&sfhp);
15882f172c55SRobert Thurlow 	return (vp);
15892f172c55SRobert Thurlow }
15902f172c55SRobert Thurlow 
15912f172c55SRobert Thurlow int
15922f172c55SRobert Thurlow nfs4_setup_referral(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr)
15932f172c55SRobert Thurlow {
15942f172c55SRobert Thurlow 	vnode_t *nvp;
15952f172c55SRobert Thurlow 	rnode4_t *rp;
15962f172c55SRobert Thurlow 
15972f172c55SRobert Thurlow 	if ((nvp = find_referral_stubvp(dvp, nm, cr)) == NULL)
15982f172c55SRobert Thurlow 		return (EINVAL);
15992f172c55SRobert Thurlow 
16002f172c55SRobert Thurlow 	rp = VTOR4(nvp);
16012f172c55SRobert Thurlow 	mutex_enter(&rp->r_statelock);
16022f172c55SRobert Thurlow 	r4_stub_referral(rp);
16032f172c55SRobert Thurlow 	mutex_exit(&rp->r_statelock);
16042f172c55SRobert Thurlow 	dnlc_enter(dvp, nm, nvp);
16052f172c55SRobert Thurlow 
16062f172c55SRobert Thurlow 	if (*vpp != NULL)
16072f172c55SRobert Thurlow 		VN_RELE(*vpp);	/* no longer need this vnode */
16082f172c55SRobert Thurlow 
16092f172c55SRobert Thurlow 	*vpp = nvp;
16102f172c55SRobert Thurlow 
16112f172c55SRobert Thurlow 	return (0);
16122f172c55SRobert Thurlow }
16132f172c55SRobert Thurlow 
16142f172c55SRobert Thurlow /*
16152f172c55SRobert Thurlow  * Fetch the location information and resolve the new server.
16162f172c55SRobert Thurlow  * Caller needs to free up the XDR data which is returned.
16172f172c55SRobert Thurlow  * Input: mount info, shared filehandle, nodename
16182f172c55SRobert Thurlow  * Return: Index to the result or Error(-1)
16192f172c55SRobert Thurlow  * Output: FsLocations Info, Resolved Server Info.
16202f172c55SRobert Thurlow  */
16212f172c55SRobert Thurlow int
16222f172c55SRobert Thurlow nfs4_process_referral(mntinfo4_t *mi, nfs4_sharedfh_t *sfh,
16232f172c55SRobert Thurlow     char *nm, cred_t *cr, nfs4_ga_res_t *grp, COMPOUND4res_clnt *res,
16242f172c55SRobert Thurlow     struct nfs_fsl_info *fsloc)
16252f172c55SRobert Thurlow {
16262f172c55SRobert Thurlow 	fs_location4 *fsp;
16272f172c55SRobert Thurlow 	struct nfs_fsl_info nfsfsloc;
16282f172c55SRobert Thurlow 	int ret, i, error;
16292f172c55SRobert Thurlow 	nfs4_ga_res_t garp;
16302f172c55SRobert Thurlow 	COMPOUND4res_clnt callres;
16312f172c55SRobert Thurlow 	struct knetconfig *knc;
16322f172c55SRobert Thurlow 
16332f172c55SRobert Thurlow 	ret = nfs4_fetch_locations(mi, sfh, nm, cr, &garp, &callres, TRUE);
16342f172c55SRobert Thurlow 	if (ret == 0)
16352f172c55SRobert Thurlow 		return (-1);
16362f172c55SRobert Thurlow 
16372f172c55SRobert Thurlow 	/*
16382f172c55SRobert Thurlow 	 * As a lame attempt to figuring out if we're
16392f172c55SRobert Thurlow 	 * handling a migration event or a referral,
16402f172c55SRobert Thurlow 	 * look for rnodes with this fsid in the rnode
16412f172c55SRobert Thurlow 	 * cache.
16422f172c55SRobert Thurlow 	 *
16432f172c55SRobert Thurlow 	 * If we can find one or more such rnodes, it
16442f172c55SRobert Thurlow 	 * means we're handling a migration event and
16452f172c55SRobert Thurlow 	 * we want to bail out in that case.
16462f172c55SRobert Thurlow 	 */
16472f172c55SRobert Thurlow 	if (r4find_by_fsid(mi, &garp.n4g_fsid)) {
16482f172c55SRobert Thurlow 		DTRACE_PROBE3(nfs4clnt__debug__referral__migration,
16492f172c55SRobert Thurlow 		    mntinfo4_t *, mi, nfs4_ga_res_t *, &garp,
16502f172c55SRobert Thurlow 		    char *, "nfs4_process_referral");
1651*a17ce845SMarcel Telka 		xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&callres);
16522f172c55SRobert Thurlow 		return (-1);
16532f172c55SRobert Thurlow 	}
16542f172c55SRobert Thurlow 
16552f172c55SRobert Thurlow 	/*
16562f172c55SRobert Thurlow 	 * Find the first responsive server to mount.  When we find
16572f172c55SRobert Thurlow 	 * one, fsp will point to it.
16582f172c55SRobert Thurlow 	 */
16592f172c55SRobert Thurlow 	for (i = 0; i < garp.n4g_ext_res->n4g_fslocations.locations_len; i++) {
16602f172c55SRobert Thurlow 
16612f172c55SRobert Thurlow 		fsp = &garp.n4g_ext_res->n4g_fslocations.locations_val[i];
16622f172c55SRobert Thurlow 		if (fsp->server_len == 0 || fsp->server_val == NULL)
16632f172c55SRobert Thurlow 			continue;
16642f172c55SRobert Thurlow 
16652f172c55SRobert Thurlow 		error = nfs4_callmapid(fsp->server_val, &nfsfsloc);
16662f172c55SRobert Thurlow 		if (error != 0)
16672f172c55SRobert Thurlow 			continue;
16682f172c55SRobert Thurlow 
16692f172c55SRobert Thurlow 		error = nfs4_ping_server_common(nfsfsloc.knconf,
16702f172c55SRobert Thurlow 		    nfsfsloc.addr, !(mi->mi_flags & MI4_INT));
16712f172c55SRobert Thurlow 		if (error == RPC_SUCCESS)
16722f172c55SRobert Thurlow 			break;
16732f172c55SRobert Thurlow 
16742f172c55SRobert Thurlow 		DTRACE_PROBE2(nfs4clnt__debug__referral__srvaddr,
16752f172c55SRobert Thurlow 		    sockaddr_in *, (struct sockaddr_in *)nfsfsloc.addr->buf,
16762f172c55SRobert Thurlow 		    char *, "nfs4_process_referral");
16772f172c55SRobert Thurlow 
1678*a17ce845SMarcel Telka 		xdr_free(xdr_nfs_fsl_info, (char *)&nfsfsloc);
16792f172c55SRobert Thurlow 	}
16802f172c55SRobert Thurlow 	knc = nfsfsloc.knconf;
16812f172c55SRobert Thurlow 	if ((i >= garp.n4g_ext_res->n4g_fslocations.locations_len) ||
16822f172c55SRobert Thurlow 	    (knc->knc_protofmly == NULL) || (knc->knc_proto == NULL)) {
16832f172c55SRobert Thurlow 		DTRACE_PROBE2(nfs4clnt__debug__referral__nofsloc,
16842f172c55SRobert Thurlow 		    nfs4_ga_res_t *, &garp, char *, "nfs4_process_referral");
1685*a17ce845SMarcel Telka 		xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&callres);
16862f172c55SRobert Thurlow 		return (-1);
16872f172c55SRobert Thurlow 	}
16882f172c55SRobert Thurlow 
16892f172c55SRobert Thurlow 	/* Send the results back */
16902f172c55SRobert Thurlow 	*fsloc = nfsfsloc;
16912f172c55SRobert Thurlow 	*grp = garp;
16922f172c55SRobert Thurlow 	*res = callres;
16932f172c55SRobert Thurlow 	return (i);
16942f172c55SRobert Thurlow }
16952f172c55SRobert Thurlow 
16962f172c55SRobert Thurlow /*
16972f172c55SRobert Thurlow  * Referrals case - need to fetch referral data and then upcall to
16982f172c55SRobert Thurlow  * user-level to get complete mount data.
16992f172c55SRobert Thurlow  */
17002f172c55SRobert Thurlow static ephemeral_servinfo_t *
17012f172c55SRobert Thurlow nfs4_trigger_esi_create_referral(vnode_t *vp, cred_t *cr)
17022f172c55SRobert Thurlow {
17032f172c55SRobert Thurlow 	struct knetconfig	*sikncp, *svkncp;
17042f172c55SRobert Thurlow 	struct netbuf		*bufp;
17052f172c55SRobert Thurlow 	ephemeral_servinfo_t	*esi;
17062f172c55SRobert Thurlow 	vnode_t			*dvp;
17072f172c55SRobert Thurlow 	rnode4_t		*drp;
17082f172c55SRobert Thurlow 	fs_location4		*fsp;
17092f172c55SRobert Thurlow 	struct nfs_fsl_info	nfsfsloc;
17102f172c55SRobert Thurlow 	nfs4_ga_res_t		garp;
17112f172c55SRobert Thurlow 	char			*p;
17122f172c55SRobert Thurlow 	char			fn[MAXNAMELEN];
17132f172c55SRobert Thurlow 	int			i, index = -1;
17142f172c55SRobert Thurlow 	mntinfo4_t		*mi;
17152f172c55SRobert Thurlow 	COMPOUND4res_clnt	callres;
17162f172c55SRobert Thurlow 
17172f172c55SRobert Thurlow 	/*
17182f172c55SRobert Thurlow 	 * If we're passed in a stub vnode that
17192f172c55SRobert Thurlow 	 * isn't a "referral" stub, bail out
17202f172c55SRobert Thurlow 	 * and return a failure
17212f172c55SRobert Thurlow 	 */
17222f172c55SRobert Thurlow 	if (!RP_ISSTUB_REFERRAL(VTOR4(vp)))
17232f172c55SRobert Thurlow 		return (NULL);
17242f172c55SRobert Thurlow 
17252f172c55SRobert Thurlow 	if (vtodv(vp, &dvp, CRED(), TRUE) != 0)
17262f172c55SRobert Thurlow 		return (NULL);
17272f172c55SRobert Thurlow 
17282f172c55SRobert Thurlow 	drp = VTOR4(dvp);
17292f172c55SRobert Thurlow 	if (nfs_rw_enter_sig(&drp->r_rwlock, RW_READER, INTR4(dvp))) {
17302f172c55SRobert Thurlow 		VN_RELE(dvp);
17312f172c55SRobert Thurlow 		return (NULL);
17322f172c55SRobert Thurlow 	}
17332f172c55SRobert Thurlow 
17342f172c55SRobert Thurlow 	if (vtoname(vp, fn, MAXNAMELEN) != 0) {
17352f172c55SRobert Thurlow 		nfs_rw_exit(&drp->r_rwlock);
17362f172c55SRobert Thurlow 		VN_RELE(dvp);
17372f172c55SRobert Thurlow 		return (NULL);
17382f172c55SRobert Thurlow 	}
17392f172c55SRobert Thurlow 
17402f172c55SRobert Thurlow 	mi = VTOMI4(dvp);
17412f172c55SRobert Thurlow 	index = nfs4_process_referral(mi, drp->r_fh, fn, cr,
17422f172c55SRobert Thurlow 	    &garp, &callres, &nfsfsloc);
17432f172c55SRobert Thurlow 	nfs_rw_exit(&drp->r_rwlock);
17442f172c55SRobert Thurlow 	VN_RELE(dvp);
17452f172c55SRobert Thurlow 	if (index < 0)
17462f172c55SRobert Thurlow 		return (NULL);
17472f172c55SRobert Thurlow 
17482f172c55SRobert Thurlow 	fsp = &garp.n4g_ext_res->n4g_fslocations.locations_val[index];
17492f172c55SRobert Thurlow 	esi = kmem_zalloc(sizeof (ephemeral_servinfo_t), KM_SLEEP);
17502f172c55SRobert Thurlow 
17512f172c55SRobert Thurlow 	/* initially set to be our type of ephemeral mount; may be added to */
17522f172c55SRobert Thurlow 	esi->esi_mount_flags = NFSMNT_REFERRAL;
17532f172c55SRobert Thurlow 
17542f172c55SRobert Thurlow 	esi->esi_hostname =
17552f172c55SRobert Thurlow 	    kmem_zalloc(fsp->server_val->utf8string_len + 1, KM_SLEEP);
17562f172c55SRobert Thurlow 	bcopy(fsp->server_val->utf8string_val, esi->esi_hostname,
17572f172c55SRobert Thurlow 	    fsp->server_val->utf8string_len);
17582f172c55SRobert Thurlow 	esi->esi_hostname[fsp->server_val->utf8string_len] = '\0';
17592f172c55SRobert Thurlow 
17602f172c55SRobert Thurlow 	bufp = kmem_alloc(sizeof (struct netbuf), KM_SLEEP);
17612f172c55SRobert Thurlow 	bufp->len = nfsfsloc.addr->len;
17622f172c55SRobert Thurlow 	bufp->maxlen = nfsfsloc.addr->maxlen;
17632f172c55SRobert Thurlow 	bufp->buf = kmem_zalloc(bufp->len, KM_SLEEP);
17642f172c55SRobert Thurlow 	bcopy(nfsfsloc.addr->buf, bufp->buf, bufp->len);
17652f172c55SRobert Thurlow 	esi->esi_addr = bufp;
17662f172c55SRobert Thurlow 
17672f172c55SRobert Thurlow 	esi->esi_knconf = kmem_zalloc(sizeof (*esi->esi_knconf), KM_SLEEP);
17682f172c55SRobert Thurlow 	sikncp = esi->esi_knconf;
17692f172c55SRobert Thurlow 
17702f172c55SRobert Thurlow 	DTRACE_PROBE2(nfs4clnt__debug__referral__nfsfsloc,
17712f172c55SRobert Thurlow 	    struct nfs_fsl_info *, &nfsfsloc,
17722f172c55SRobert Thurlow 	    char *, "nfs4_trigger_esi_create_referral");
17732f172c55SRobert Thurlow 
17742f172c55SRobert Thurlow 	svkncp = nfsfsloc.knconf;
17752f172c55SRobert Thurlow 	sikncp->knc_semantics = svkncp->knc_semantics;
17762f172c55SRobert Thurlow 	sikncp->knc_protofmly = (caddr_t)kmem_zalloc(KNC_STRSIZE, KM_SLEEP);
17772f172c55SRobert Thurlow 	(void) strlcat((char *)sikncp->knc_protofmly,
17782f172c55SRobert Thurlow 	    (char *)svkncp->knc_protofmly, KNC_STRSIZE);
17792f172c55SRobert Thurlow 	sikncp->knc_proto = (caddr_t)kmem_zalloc(KNC_STRSIZE, KM_SLEEP);
17802f172c55SRobert Thurlow 	(void) strlcat((char *)sikncp->knc_proto, (char *)svkncp->knc_proto,
17812f172c55SRobert Thurlow 	    KNC_STRSIZE);
17822f172c55SRobert Thurlow 	sikncp->knc_rdev = svkncp->knc_rdev;
17832f172c55SRobert Thurlow 
17842f172c55SRobert Thurlow 	DTRACE_PROBE2(nfs4clnt__debug__referral__knetconf,
17852f172c55SRobert Thurlow 	    struct knetconfig *, sikncp,
17862f172c55SRobert Thurlow 	    char *, "nfs4_trigger_esi_create_referral");
17872f172c55SRobert Thurlow 
17882f172c55SRobert Thurlow 	esi->esi_netname = kmem_zalloc(nfsfsloc.netnm_len, KM_SLEEP);
17892f172c55SRobert Thurlow 	bcopy(nfsfsloc.netname, esi->esi_netname, nfsfsloc.netnm_len);
17902f172c55SRobert Thurlow 	esi->esi_syncaddr = NULL;
17912f172c55SRobert Thurlow 
17922f172c55SRobert Thurlow 	esi->esi_path = p = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
17932f172c55SRobert Thurlow 	esi->esi_path_len = MAXPATHLEN;
17942f172c55SRobert Thurlow 	*p++ = '/';
17952f172c55SRobert Thurlow 	for (i = 0; i < fsp->rootpath.pathname4_len; i++) {
17962f172c55SRobert Thurlow 		component4 *comp;
17972f172c55SRobert Thurlow 
17982f172c55SRobert Thurlow 		comp = &fsp->rootpath.pathname4_val[i];
17992f172c55SRobert Thurlow 		/* If no space, null the string and bail */
18002f172c55SRobert Thurlow 		if ((p - esi->esi_path) + comp->utf8string_len + 1 > MAXPATHLEN)
18012f172c55SRobert Thurlow 			goto err;
18022f172c55SRobert Thurlow 		bcopy(comp->utf8string_val, p, comp->utf8string_len);
18032f172c55SRobert Thurlow 		p += comp->utf8string_len;
18042f172c55SRobert Thurlow 		*p++ = '/';
18052f172c55SRobert Thurlow 	}
18062f172c55SRobert Thurlow 	if (fsp->rootpath.pathname4_len != 0)
18072f172c55SRobert Thurlow 		*(p - 1) = '\0';
18082f172c55SRobert Thurlow 	else
18092f172c55SRobert Thurlow 		*p = '\0';
18102f172c55SRobert Thurlow 	p = esi->esi_path;
18112f172c55SRobert Thurlow 	esi->esi_path = strdup(p);
18122f172c55SRobert Thurlow 	esi->esi_path_len = strlen(p) + 1;
18132f172c55SRobert Thurlow 	kmem_free(p, MAXPATHLEN);
18142f172c55SRobert Thurlow 
18152f172c55SRobert Thurlow 	/* Allocated in nfs4_process_referral() */
1816*a17ce845SMarcel Telka 	xdr_free(xdr_nfs_fsl_info, (char *)&nfsfsloc);
1817*a17ce845SMarcel Telka 	xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&callres);
18182f172c55SRobert Thurlow 
18192f172c55SRobert Thurlow 	return (esi);
18202f172c55SRobert Thurlow err:
18212f172c55SRobert Thurlow 	kmem_free(esi->esi_path, esi->esi_path_len);
18222f172c55SRobert Thurlow 	kmem_free(esi->esi_hostname, fsp->server_val->utf8string_len + 1);
18232f172c55SRobert Thurlow 	kmem_free(esi->esi_addr->buf, esi->esi_addr->len);
18242f172c55SRobert Thurlow 	kmem_free(esi->esi_addr, sizeof (struct netbuf));
18252f172c55SRobert Thurlow 	kmem_free(esi->esi_knconf->knc_protofmly, KNC_STRSIZE);
18262f172c55SRobert Thurlow 	kmem_free(esi->esi_knconf->knc_proto, KNC_STRSIZE);
18272f172c55SRobert Thurlow 	kmem_free(esi->esi_knconf, sizeof (*esi->esi_knconf));
18282f172c55SRobert Thurlow 	kmem_free(esi->esi_netname, nfsfsloc.netnm_len);
18292f172c55SRobert Thurlow 	kmem_free(esi, sizeof (ephemeral_servinfo_t));
1830*a17ce845SMarcel Telka 	xdr_free(xdr_nfs_fsl_info, (char *)&nfsfsloc);
1831*a17ce845SMarcel Telka 	xdr_free(xdr_COMPOUND4res_clnt, (caddr_t)&callres);
18322f172c55SRobert Thurlow 	return (NULL);
18332f172c55SRobert Thurlow }
18342f172c55SRobert Thurlow 
1835b9238976Sth /*
1836b9238976Sth  * Assemble the args, and call the generic VFS mount function to
1837b9238976Sth  * finally perform the ephemeral mount.
1838b9238976Sth  */
1839b9238976Sth static int
1840b9238976Sth nfs4_trigger_domount(vnode_t *stubvp, domount_args_t *dma, vfs_t **vfsp,
18416962f5b8SThomas Haynes     cred_t *cr, vnode_t **newvpp)
1842b9238976Sth {
1843b9238976Sth 	struct mounta	*uap;
1844b9238976Sth 	char		*mntpt, *orig_path, *path;
1845b9238976Sth 	const char	*orig_mntpt;
1846b9238976Sth 	int		retval;
1847b9238976Sth 	int		mntpt_len;
1848b9238976Sth 	int		spec_len;
1849b9238976Sth 	zone_t		*zone = curproc->p_zone;
1850b9238976Sth 	bool_t		has_leading_slash;
18516962f5b8SThomas Haynes 	int		i;
1852b9238976Sth 
1853b9238976Sth 	vfs_t			*stubvfsp = stubvp->v_vfsp;
1854b9238976Sth 	ephemeral_servinfo_t	*esi = dma->dma_esi;
1855b9238976Sth 	struct nfs_args		*nargs = dma->dma_nargs;
1856b9238976Sth 
1857b9238976Sth 	/* first, construct the mount point for the ephemeral mount */
1858b9238976Sth 	orig_path = path = fn_path(VTOSV(stubvp)->sv_name);
1859b9238976Sth 	orig_mntpt = (char *)refstr_value(stubvfsp->vfs_mntpt);
1860b9238976Sth 
1861b9238976Sth 	if (*orig_path == '.')
1862b9238976Sth 		orig_path++;
1863b9238976Sth 
1864b9238976Sth 	/*
1865b9238976Sth 	 * Get rid of zone's root path
1866b9238976Sth 	 */
1867b9238976Sth 	if (zone != global_zone) {
1868b9238976Sth 		/*
1869b9238976Sth 		 * -1 for trailing '/' and -1 for EOS.
1870b9238976Sth 		 */
1871b9238976Sth 		if (strncmp(zone->zone_rootpath, orig_mntpt,
1872b9238976Sth 		    zone->zone_rootpathlen - 1) == 0) {
1873b9238976Sth 			orig_mntpt += (zone->zone_rootpathlen - 2);
1874b9238976Sth 		}
1875b9238976Sth 	}
1876b9238976Sth 
1877b9238976Sth 	mntpt_len = strlen(orig_mntpt) + strlen(orig_path);
1878b9238976Sth 	mntpt = kmem_zalloc(mntpt_len + 1, KM_SLEEP);
1879b9238976Sth 	(void) strcat(mntpt, orig_mntpt);
1880b9238976Sth 	(void) strcat(mntpt, orig_path);
1881b9238976Sth 
1882b9238976Sth 	kmem_free(path, strlen(path) + 1);
1883b9238976Sth 	path = esi->esi_path;
1884b9238976Sth 	if (*path == '.')
1885b9238976Sth 		path++;
1886b9238976Sth 	if (path[0] == '/' && path[1] == '/')
1887b9238976Sth 		path++;
1888b9238976Sth 	has_leading_slash = (*path == '/');
1889b9238976Sth 
1890b9238976Sth 	spec_len = strlen(dma->dma_hostlist);
1891b9238976Sth 	spec_len += strlen(path);
1892b9238976Sth 
1893b9238976Sth 	/* We are going to have to add this in */
1894b9238976Sth 	if (!has_leading_slash)
1895b9238976Sth 		spec_len++;
1896b9238976Sth 
1897b9238976Sth 	/* We need to get the ':' for dma_hostlist:esi_path */
1898b9238976Sth 	spec_len++;
1899b9238976Sth 
1900b9238976Sth 	uap = kmem_zalloc(sizeof (struct mounta), KM_SLEEP);
1901b9238976Sth 	uap->spec = kmem_zalloc(spec_len + 1, KM_SLEEP);
1902b9238976Sth 	(void) snprintf(uap->spec, spec_len + 1, "%s:%s%s", dma->dma_hostlist,
1903b9238976Sth 	    has_leading_slash ? "" : "/", path);
1904b9238976Sth 
1905b9238976Sth 	uap->dir = mntpt;
1906b9238976Sth 
1907b9238976Sth 	uap->flags = MS_SYSSPACE | MS_DATA;
1908b9238976Sth 	/* fstype-independent mount options not covered elsewhere */
1909b9238976Sth 	/* copy parent's mount(1M) "-m" flag */
1910b9238976Sth 	if (stubvfsp->vfs_flag & VFS_NOMNTTAB)
1911b9238976Sth 		uap->flags |= MS_NOMNTTAB;
1912b9238976Sth 
1913b9238976Sth 	uap->fstype = MNTTYPE_NFS4;
1914b9238976Sth 	uap->dataptr = (char *)nargs;
1915b9238976Sth 	/* not needed for MS_SYSSPACE */
1916b9238976Sth 	uap->datalen = 0;
1917b9238976Sth 
1918b9238976Sth 	/* use optptr to pass in extra mount options */
1919b9238976Sth 	uap->flags |= MS_OPTIONSTR;
1920b9238976Sth 	uap->optptr = nfs4_trigger_create_mntopts(stubvfsp);
1921b9238976Sth 	if (uap->optptr == NULL) {
1922b9238976Sth 		retval = EINVAL;
1923b9238976Sth 		goto done;
1924b9238976Sth 	}
1925546a3997SThomas Haynes 
1926b9238976Sth 	/* domount() expects us to count the trailing NUL */
1927b9238976Sth 	uap->optlen = strlen(uap->optptr) + 1;
1928b9238976Sth 
19296962f5b8SThomas Haynes 	/*
19306962f5b8SThomas Haynes 	 * If we get EBUSY, we try again once to see if we can perform
19316962f5b8SThomas Haynes 	 * the mount. We do this because of a spurious race condition.
19326962f5b8SThomas Haynes 	 */
19336962f5b8SThomas Haynes 	for (i = 0; i < 2; i++) {
19346962f5b8SThomas Haynes 		int	error;
19356962f5b8SThomas Haynes 		bool_t	was_mounted;
19366962f5b8SThomas Haynes 
19376962f5b8SThomas Haynes 		retval = domount(NULL, uap, stubvp, cr, vfsp);
19386962f5b8SThomas Haynes 		if (retval == 0) {
19396962f5b8SThomas Haynes 			retval = VFS_ROOT(*vfsp, newvpp);
19406962f5b8SThomas Haynes 			VFS_RELE(*vfsp);
19416962f5b8SThomas Haynes 			break;
19426962f5b8SThomas Haynes 		} else if (retval != EBUSY) {
19436962f5b8SThomas Haynes 			break;
19446962f5b8SThomas Haynes 		}
19456962f5b8SThomas Haynes 
19466962f5b8SThomas Haynes 		/*
19476962f5b8SThomas Haynes 		 * We might find it mounted by the other racer...
19486962f5b8SThomas Haynes 		 */
19496962f5b8SThomas Haynes 		error = nfs4_trigger_mounted_already(stubvp,
19506962f5b8SThomas Haynes 		    newvpp, &was_mounted, vfsp);
19516962f5b8SThomas Haynes 		if (error) {
19526962f5b8SThomas Haynes 			goto done;
19536962f5b8SThomas Haynes 		} else if (was_mounted) {
19546962f5b8SThomas Haynes 			retval = 0;
19556962f5b8SThomas Haynes 			break;
19566962f5b8SThomas Haynes 		}
19576962f5b8SThomas Haynes 	}
1958546a3997SThomas Haynes 
1959b9238976Sth done:
1960b9238976Sth 	if (uap->optptr)
1961b9238976Sth 		nfs4_trigger_destroy_mntopts(uap->optptr);
1962b9238976Sth 
1963b9238976Sth 	kmem_free(uap->spec, spec_len + 1);
1964b9238976Sth 	kmem_free(uap, sizeof (struct mounta));
1965b9238976Sth 	kmem_free(mntpt, mntpt_len + 1);
1966b9238976Sth 
1967b9238976Sth 	return (retval);
1968b9238976Sth }
1969b9238976Sth 
1970b9238976Sth /*
1971b9238976Sth  * Build an nfs_args structure for passing to domount().
1972b9238976Sth  *
1973b9238976Sth  * Ephemeral mount-type specific data comes from the ephemeral_servinfo_t;
1974b9238976Sth  * generic data - common to all ephemeral mount types - is read directly
1975b9238976Sth  * from the parent mount's servinfo4_t and mntinfo4_t, via the stub vnode.
1976b9238976Sth  */
1977b9238976Sth static struct nfs_args *
1978b9238976Sth nfs4_trigger_nargs_create(mntinfo4_t *mi, servinfo4_t *svp,
1979b9238976Sth     ephemeral_servinfo_t *esi)
1980b9238976Sth {
1981b9238976Sth 	sec_data_t *secdata;
1982b9238976Sth 	struct nfs_args *nargs;
1983b9238976Sth 
1984b9238976Sth 	/* setup the nfs args */
1985b9238976Sth 	nargs = kmem_zalloc(sizeof (struct nfs_args), KM_SLEEP);
1986b9238976Sth 
1987b9238976Sth 	(void) nfs_rw_enter_sig(&svp->sv_lock, RW_READER, 0);
1988b9238976Sth 
1989b9238976Sth 	nargs->addr = esi->esi_addr;
1990b9238976Sth 
1991b9238976Sth 	/* for AUTH_DH by negotiation */
1992b9238976Sth 	if (esi->esi_syncaddr || esi->esi_netname) {
1993b9238976Sth 		nargs->flags |= NFSMNT_SECURE;
1994b9238976Sth 		nargs->syncaddr = esi->esi_syncaddr;
1995b9238976Sth 		nargs->netname = esi->esi_netname;
1996b9238976Sth 	}
1997b9238976Sth 
1998b9238976Sth 	nargs->flags |= NFSMNT_KNCONF;
1999b9238976Sth 	nargs->knconf = esi->esi_knconf;
2000b9238976Sth 	nargs->flags |= NFSMNT_HOSTNAME;
2001b9238976Sth 	nargs->hostname = esi->esi_hostname;
2002b9238976Sth 	nargs->fh = esi->esi_path;
2003b9238976Sth 
2004b9238976Sth 	/* general mount settings, all copied from parent mount */
2005b9238976Sth 	mutex_enter(&mi->mi_lock);
2006b9238976Sth 
2007b9238976Sth 	if (!(mi->mi_flags & MI4_HARD))
2008b9238976Sth 		nargs->flags |= NFSMNT_SOFT;
2009b9238976Sth 
2010b9238976Sth 	nargs->flags |= NFSMNT_WSIZE | NFSMNT_RSIZE | NFSMNT_TIMEO |
2011b9238976Sth 	    NFSMNT_RETRANS;
2012b9238976Sth 	nargs->wsize = mi->mi_stsize;
2013b9238976Sth 	nargs->rsize = mi->mi_tsize;
2014b9238976Sth 	nargs->timeo = mi->mi_timeo;
2015b9238976Sth 	nargs->retrans = mi->mi_retrans;
2016b9238976Sth 
2017b9238976Sth 	if (mi->mi_flags & MI4_INT)
2018b9238976Sth 		nargs->flags |= NFSMNT_INT;
2019b9238976Sth 	if (mi->mi_flags & MI4_NOAC)
2020b9238976Sth 		nargs->flags |= NFSMNT_NOAC;
2021b9238976Sth 
2022b9238976Sth 	nargs->flags |= NFSMNT_ACREGMIN | NFSMNT_ACREGMAX | NFSMNT_ACDIRMIN |
2023b9238976Sth 	    NFSMNT_ACDIRMAX;
2024b9238976Sth 	nargs->acregmin = HR2SEC(mi->mi_acregmin);
2025b9238976Sth 	nargs->acregmax = HR2SEC(mi->mi_acregmax);
2026b9238976Sth 	nargs->acdirmin = HR2SEC(mi->mi_acdirmin);
2027b9238976Sth 	nargs->acdirmax = HR2SEC(mi->mi_acdirmax);
2028b9238976Sth 
20292f172c55SRobert Thurlow 	/* add any specific flags for this type of ephemeral mount */
20302f172c55SRobert Thurlow 	nargs->flags |= esi->esi_mount_flags;
20312f172c55SRobert Thurlow 
2032b9238976Sth 	if (mi->mi_flags & MI4_NOCTO)
2033b9238976Sth 		nargs->flags |= NFSMNT_NOCTO;
2034b9238976Sth 	if (mi->mi_flags & MI4_GRPID)
2035b9238976Sth 		nargs->flags |= NFSMNT_GRPID;
2036b9238976Sth 	if (mi->mi_flags & MI4_LLOCK)
2037b9238976Sth 		nargs->flags |= NFSMNT_LLOCK;
2038b9238976Sth 	if (mi->mi_flags & MI4_NOPRINT)
2039b9238976Sth 		nargs->flags |= NFSMNT_NOPRINT;
2040b9238976Sth 	if (mi->mi_flags & MI4_DIRECTIO)
2041b9238976Sth 		nargs->flags |= NFSMNT_DIRECTIO;
20422f172c55SRobert Thurlow 	if (mi->mi_flags & MI4_PUBLIC && nargs->flags & NFSMNT_MIRRORMOUNT)
2043b9238976Sth 		nargs->flags |= NFSMNT_PUBLIC;
2044b9238976Sth 
20452f172c55SRobert Thurlow 	/* Do some referral-specific option tweaking */
20462f172c55SRobert Thurlow 	if (nargs->flags & NFSMNT_REFERRAL) {
20472f172c55SRobert Thurlow 		nargs->flags &= ~NFSMNT_DORDMA;
20482f172c55SRobert Thurlow 		nargs->flags |= NFSMNT_TRYRDMA;
20492f172c55SRobert Thurlow 	}
2050b9238976Sth 
20512f172c55SRobert Thurlow 	mutex_exit(&mi->mi_lock);
2052b9238976Sth 
2053b9238976Sth 	/*
2054b9238976Sth 	 * Security data & negotiation policy.
2055b9238976Sth 	 *
20562f172c55SRobert Thurlow 	 * For mirror mounts, we need to preserve the parent mount's
20572f172c55SRobert Thurlow 	 * preference for security negotiation, translating SV4_TRYSECDEFAULT
20582f172c55SRobert Thurlow 	 * to NFSMNT_SECDEFAULT if present.
20592f172c55SRobert Thurlow 	 *
20602f172c55SRobert Thurlow 	 * For referrals, we always want security negotiation and will
20612f172c55SRobert Thurlow 	 * set NFSMNT_SECDEFAULT and we will not copy current secdata.
20622f172c55SRobert Thurlow 	 * The reason is that we can't negotiate down from a parent's
20632f172c55SRobert Thurlow 	 * Kerberos flavor to AUTH_SYS.
2064b9238976Sth 	 *
2065b9238976Sth 	 * If SV4_TRYSECDEFAULT is not set, that indicates that a specific
2066b9238976Sth 	 * security flavour was requested, with data in sv_secdata, and that
2067b9238976Sth 	 * no negotiation should occur. If this specified flavour fails, that's
2068b9238976Sth 	 * it. We will copy sv_secdata, and not set NFSMNT_SECDEFAULT.
2069b9238976Sth 	 *
2070b9238976Sth 	 * If SV4_TRYSECDEFAULT is set, then we start with a passed-in
2071b9238976Sth 	 * default flavour, in sv_secdata, but then negotiate a new flavour.
2072b9238976Sth 	 * Possible flavours are recorded in an array in sv_secinfo, with
2073b9238976Sth 	 * currently in-use flavour pointed to by sv_currsec.
2074b9238976Sth 	 *
2075b9238976Sth 	 * If sv_currsec is set, i.e. if negotiation has already occurred,
2076b9238976Sth 	 * we will copy sv_currsec. Otherwise, copy sv_secdata. Regardless,
2077b9238976Sth 	 * we will set NFSMNT_SECDEFAULT, to enable negotiation.
2078b9238976Sth 	 */
20792f172c55SRobert Thurlow 	if (nargs->flags & NFSMNT_REFERRAL) {
20802f172c55SRobert Thurlow 		/* enable negotiation for referral mount */
20812f172c55SRobert Thurlow 		nargs->flags |= NFSMNT_SECDEFAULT;
20822f172c55SRobert Thurlow 		secdata = kmem_alloc(sizeof (sec_data_t), KM_SLEEP);
20832f172c55SRobert Thurlow 		secdata->secmod = secdata->rpcflavor = AUTH_SYS;
20842f172c55SRobert Thurlow 		secdata->data = NULL;
2085b87f76edSThomas Haynes 	} else if (svp->sv_flags & SV4_TRYSECDEFAULT) {
20862f172c55SRobert Thurlow 		/* enable negotiation for mirror mount */
2087b9238976Sth 		nargs->flags |= NFSMNT_SECDEFAULT;
2088b9238976Sth 
2089b9238976Sth 		/*
2090b9238976Sth 		 * As a starting point for negotiation, copy parent
2091b9238976Sth 		 * mount's negotiated flavour (sv_currsec) if available,
2092b9238976Sth 		 * or its passed-in flavour (sv_secdata) if not.
2093b9238976Sth 		 */
2094b9238976Sth 		if (svp->sv_currsec != NULL)
2095b9238976Sth 			secdata = copy_sec_data(svp->sv_currsec);
2096b9238976Sth 		else if (svp->sv_secdata != NULL)
2097b9238976Sth 			secdata = copy_sec_data(svp->sv_secdata);
2098b9238976Sth 		else
2099b9238976Sth 			secdata = NULL;
2100b9238976Sth 	} else {
2101b9238976Sth 		/* do not enable negotiation; copy parent's passed-in flavour */
2102b9238976Sth 		if (svp->sv_secdata != NULL)
2103b9238976Sth 			secdata = copy_sec_data(svp->sv_secdata);
2104b9238976Sth 		else
2105b9238976Sth 			secdata = NULL;
2106b9238976Sth 	}
2107b9238976Sth 
2108b9238976Sth 	nfs_rw_exit(&svp->sv_lock);
2109b9238976Sth 
2110b9238976Sth 	nargs->flags |= NFSMNT_NEWARGS;
2111b9238976Sth 	nargs->nfs_args_ext = NFS_ARGS_EXTB;
2112b9238976Sth 	nargs->nfs_ext_u.nfs_extB.secdata = secdata;
2113b9238976Sth 
2114b9238976Sth 	/* for NFS RO failover; caller will set if necessary */
2115b9238976Sth 	nargs->nfs_ext_u.nfs_extB.next = NULL;
2116b9238976Sth 
2117b9238976Sth 	return (nargs);
2118b9238976Sth }
2119b9238976Sth 
2120b9238976Sth static void
2121b9238976Sth nfs4_trigger_nargs_destroy(struct nfs_args *nargs)
2122b9238976Sth {
2123b9238976Sth 	/*
2124b9238976Sth 	 * Either the mount failed, in which case the data is not needed, or
2125b9238976Sth 	 * nfs4_mount() has either taken copies of what it needs or,
2126b9238976Sth 	 * where it has merely copied the ptr, it has set *our* ptr to NULL,
2127b9238976Sth 	 * whereby nfs4_free_args() will ignore it.
2128b9238976Sth 	 */
2129b9238976Sth 	nfs4_free_args(nargs);
2130b9238976Sth 	kmem_free(nargs, sizeof (struct nfs_args));
2131b9238976Sth }
2132b9238976Sth 
2133b9238976Sth /*
2134b9238976Sth  * When we finally get into the mounting, we need to add this
2135b9238976Sth  * node to the ephemeral tree.
2136b9238976Sth  *
2137b9238976Sth  * This is called from nfs4_mount().
2138b9238976Sth  */
2139d3a14591SThomas Haynes int
2140b9238976Sth nfs4_record_ephemeral_mount(mntinfo4_t *mi, vnode_t *mvp)
2141b9238976Sth {
2142b9238976Sth 	mntinfo4_t		*mi_parent;
2143b9238976Sth 	nfs4_ephemeral_t	*eph;
2144b9238976Sth 	nfs4_ephemeral_tree_t	*net;
2145b9238976Sth 
2146b9238976Sth 	nfs4_ephemeral_t	*prior;
2147b9238976Sth 	nfs4_ephemeral_t	*child;
2148b9238976Sth 
2149b9238976Sth 	nfs4_ephemeral_t	*peer;
2150b9238976Sth 
2151b9238976Sth 	nfs4_trigger_globals_t	*ntg;
2152b9238976Sth 	zone_t			*zone = curproc->p_zone;
2153b9238976Sth 
2154d3a14591SThomas Haynes 	int			rc = 0;
2155d3a14591SThomas Haynes 
2156b9238976Sth 	mi_parent = VTOMI4(mvp);
2157b9238976Sth 
2158b9238976Sth 	/*
2159b9238976Sth 	 * Get this before grabbing anything else!
2160b9238976Sth 	 */
2161b9238976Sth 	ntg = zone_getspecific(nfs4_ephemeral_key, zone);
2162b9238976Sth 	if (!ntg->ntg_thread_started) {
2163b9238976Sth 		nfs4_ephemeral_start_harvester(ntg);
2164b9238976Sth 	}
2165b9238976Sth 
2166b9238976Sth 	mutex_enter(&mi_parent->mi_lock);
2167b9238976Sth 	mutex_enter(&mi->mi_lock);
2168b9238976Sth 
2169d3a14591SThomas Haynes 	net = mi->mi_ephemeral_tree =
2170d3a14591SThomas Haynes 	    mi_parent->mi_ephemeral_tree;
2171d3a14591SThomas Haynes 
2172d3a14591SThomas Haynes 	/*
2173d3a14591SThomas Haynes 	 * If the mi_ephemeral_tree is NULL, then it
2174d3a14591SThomas Haynes 	 * means that either the harvester or a manual
2175d3a14591SThomas Haynes 	 * umount has cleared the tree out right before
2176d3a14591SThomas Haynes 	 * we got here.
2177d3a14591SThomas Haynes 	 *
2178d3a14591SThomas Haynes 	 * There is nothing we can do here, so return
2179d3a14591SThomas Haynes 	 * to the caller and let them decide whether they
2180d3a14591SThomas Haynes 	 * try again.
2181d3a14591SThomas Haynes 	 */
2182d3a14591SThomas Haynes 	if (net == NULL) {
2183d3a14591SThomas Haynes 		mutex_exit(&mi->mi_lock);
2184d3a14591SThomas Haynes 		mutex_exit(&mi_parent->mi_lock);
2185d3a14591SThomas Haynes 
2186d3a14591SThomas Haynes 		return (EBUSY);
2187d3a14591SThomas Haynes 	}
2188d3a14591SThomas Haynes 
21892f172c55SRobert Thurlow 	/*
21902f172c55SRobert Thurlow 	 * We've just tied the mntinfo to the tree, so
21912f172c55SRobert Thurlow 	 * now we bump the refcnt and hold it there until
21922f172c55SRobert Thurlow 	 * this mntinfo is removed from the tree.
21932f172c55SRobert Thurlow 	 */
2194d3a14591SThomas Haynes 	nfs4_ephemeral_tree_hold(net);
2195d3a14591SThomas Haynes 
2196b9238976Sth 	/*
2197b9238976Sth 	 * We need to tack together the ephemeral mount
2198b9238976Sth 	 * with this new mntinfo.
2199b9238976Sth 	 */
2200b9238976Sth 	eph = kmem_zalloc(sizeof (*eph), KM_SLEEP);
2201b9238976Sth 	eph->ne_mount = mi;
2202b87f76edSThomas Haynes 	MI4_HOLD(mi);
2203b87f76edSThomas Haynes 	VFS_HOLD(mi->mi_vfsp);
2204b9238976Sth 	eph->ne_ref_time = gethrestime_sec();
2205b9238976Sth 
2206b9238976Sth 	/*
2207b9238976Sth 	 * We need to tell the ephemeral mount when
2208b9238976Sth 	 * to time out.
2209b9238976Sth 	 */
2210b9238976Sth 	eph->ne_mount_to = ntg->ntg_mount_to;
2211b9238976Sth 
2212b9238976Sth 	mi->mi_ephemeral = eph;
2213b9238976Sth 
2214b9238976Sth 	/*
2215b9238976Sth 	 * If the enclosing mntinfo4 is also ephemeral,
2216b9238976Sth 	 * then we need to point to its enclosing parent.
2217b9238976Sth 	 * Else the enclosing mntinfo4 is the enclosing parent.
2218b9238976Sth 	 *
2219b9238976Sth 	 * We also need to weave this ephemeral node
2220b9238976Sth 	 * into the tree.
2221b9238976Sth 	 */
2222b9238976Sth 	if (mi_parent->mi_flags & MI4_EPHEMERAL) {
2223b9238976Sth 		/*
2224b9238976Sth 		 * We need to decide if we are
2225b9238976Sth 		 * the root node of this branch
2226b9238976Sth 		 * or if we are a sibling of this
2227b9238976Sth 		 * branch.
2228b9238976Sth 		 */
2229b9238976Sth 		prior = mi_parent->mi_ephemeral;
2230d3a14591SThomas Haynes 		if (prior == NULL) {
2231d3a14591SThomas Haynes 			/*
2232d3a14591SThomas Haynes 			 * Race condition, clean up, and
2233d3a14591SThomas Haynes 			 * let caller handle mntinfo.
2234d3a14591SThomas Haynes 			 */
2235d3a14591SThomas Haynes 			mi->mi_flags &= ~MI4_EPHEMERAL;
2236d3a14591SThomas Haynes 			mi->mi_ephemeral = NULL;
2237d3a14591SThomas Haynes 			kmem_free(eph, sizeof (*eph));
2238b87f76edSThomas Haynes 			VFS_RELE(mi->mi_vfsp);
2239b87f76edSThomas Haynes 			MI4_RELE(mi);
22402f172c55SRobert Thurlow 			nfs4_ephemeral_tree_rele(net);
2241d3a14591SThomas Haynes 			rc = EBUSY;
2242b9238976Sth 		} else {
2243d3a14591SThomas Haynes 			if (prior->ne_child == NULL) {
2244d3a14591SThomas Haynes 				prior->ne_child = eph;
2245d3a14591SThomas Haynes 			} else {
2246d3a14591SThomas Haynes 				child = prior->ne_child;
2247b9238976Sth 
2248d3a14591SThomas Haynes 				prior->ne_child = eph;
2249d3a14591SThomas Haynes 				eph->ne_peer = child;
2250b9238976Sth 
2251d3a14591SThomas Haynes 				child->ne_prior = eph;
2252d3a14591SThomas Haynes 			}
2253b9238976Sth 
2254d3a14591SThomas Haynes 			eph->ne_prior = prior;
2255d3a14591SThomas Haynes 		}
2256b9238976Sth 	} else {
2257b9238976Sth 		/*
2258b9238976Sth 		 * The parent mntinfo4 is the non-ephemeral
2259b9238976Sth 		 * root of the ephemeral tree. We
2260b9238976Sth 		 * need to decide if we are the root
2261b9238976Sth 		 * node of that tree or if we are a
2262b9238976Sth 		 * sibling of the root node.
2263b9238976Sth 		 *
2264b9238976Sth 		 * We are the root if there is no
2265b9238976Sth 		 * other node.
2266b9238976Sth 		 */
2267b9238976Sth 		if (net->net_root == NULL) {
2268b9238976Sth 			net->net_root = eph;
2269b9238976Sth 		} else {
2270b9238976Sth 			eph->ne_peer = peer = net->net_root;
2271b9238976Sth 			ASSERT(peer != NULL);
2272b9238976Sth 			net->net_root = eph;
2273b9238976Sth 
2274b9238976Sth 			peer->ne_prior = eph;
2275b9238976Sth 		}
2276b9238976Sth 
2277b9238976Sth 		eph->ne_prior = NULL;
2278b9238976Sth 	}
2279b9238976Sth 
2280b9238976Sth 	mutex_exit(&mi->mi_lock);
2281b9238976Sth 	mutex_exit(&mi_parent->mi_lock);
2282d3a14591SThomas Haynes 
2283d3a14591SThomas Haynes 	return (rc);
2284b9238976Sth }
2285b9238976Sth 
2286b9238976Sth /*
2287b9238976Sth  * Commit the changes to the ephemeral tree for removing this node.
2288b9238976Sth  */
2289b9238976Sth static void
2290b9238976Sth nfs4_ephemeral_umount_cleanup(nfs4_ephemeral_t *eph)
2291b9238976Sth {
2292b9238976Sth 	nfs4_ephemeral_t	*e = eph;
2293b9238976Sth 	nfs4_ephemeral_t	*peer;
2294b9238976Sth 	nfs4_ephemeral_t	*prior;
2295b9238976Sth 
2296b9238976Sth 	peer = eph->ne_peer;
2297b9238976Sth 	prior = e->ne_prior;
2298b9238976Sth 
2299b9238976Sth 	/*
2300b9238976Sth 	 * If this branch root was not the
2301b9238976Sth 	 * tree root, then we need to fix back pointers.
2302b9238976Sth 	 */
2303b9238976Sth 	if (prior) {
2304b9238976Sth 		if (prior->ne_child == e) {
2305b9238976Sth 			prior->ne_child = peer;
2306b9238976Sth 		} else {
2307b9238976Sth 			prior->ne_peer = peer;
2308b9238976Sth 		}
2309b9238976Sth 
2310b9238976Sth 		if (peer)
2311b9238976Sth 			peer->ne_prior = prior;
2312b9238976Sth 	} else if (peer) {
2313b9238976Sth 		peer->ne_mount->mi_ephemeral_tree->net_root = peer;
2314b9238976Sth 		peer->ne_prior = NULL;
2315b9238976Sth 	} else {
2316b9238976Sth 		e->ne_mount->mi_ephemeral_tree->net_root = NULL;
2317b9238976Sth 	}
2318b9238976Sth }
2319b9238976Sth 
2320b9238976Sth /*
2321b9238976Sth  * We want to avoid recursion at all costs. So we need to
2322b9238976Sth  * unroll the tree. We do this by a depth first traversal to
2323b9238976Sth  * leaf nodes. We blast away the leaf and work our way back
2324b9238976Sth  * up and down the tree.
2325b9238976Sth  */
2326b9238976Sth static int
2327b9238976Sth nfs4_ephemeral_unmount_engine(nfs4_ephemeral_t *eph,
2328b9238976Sth     int isTreeRoot, int flag, cred_t *cr)
2329b9238976Sth {
2330b9238976Sth 	nfs4_ephemeral_t	*e = eph;
2331b9238976Sth 	nfs4_ephemeral_t	*prior;
2332b9238976Sth 	mntinfo4_t		*mi;
2333b9238976Sth 	vfs_t			*vfsp;
2334b9238976Sth 	int			error;
2335b9238976Sth 
2336b9238976Sth 	/*
2337b9238976Sth 	 * We use the loop while unrolling the ephemeral tree.
2338b9238976Sth 	 */
2339b9238976Sth 	for (;;) {
2340b9238976Sth 		/*
2341b9238976Sth 		 * First we walk down the child.
2342b9238976Sth 		 */
2343b9238976Sth 		if (e->ne_child) {
2344b9238976Sth 			prior = e;
2345b9238976Sth 			e = e->ne_child;
2346b9238976Sth 			continue;
2347b9238976Sth 		}
2348b9238976Sth 
2349b9238976Sth 		/*
2350b9238976Sth 		 * If we are the root of the branch we are removing,
2351b9238976Sth 		 * we end it here. But if the branch is the root of
2352b9238976Sth 		 * the tree, we have to forge on. We do not consider
2353b9238976Sth 		 * the peer list for the root because while it may
2354b9238976Sth 		 * be okay to remove, it is both extra work and a
2355b9238976Sth 		 * potential for a false-positive error to stall the
2356b9238976Sth 		 * unmount attempt.
2357b9238976Sth 		 */
2358b9238976Sth 		if (e == eph && isTreeRoot == FALSE)
2359b9238976Sth 			return (0);
2360b9238976Sth 
2361b9238976Sth 		/*
2362b9238976Sth 		 * Next we walk down the peer list.
2363b9238976Sth 		 */
2364b9238976Sth 		if (e->ne_peer) {
2365b9238976Sth 			prior = e;
2366b9238976Sth 			e = e->ne_peer;
2367b9238976Sth 			continue;
2368b9238976Sth 		}
2369b9238976Sth 
2370b9238976Sth 		/*
2371b9238976Sth 		 * We can only remove the node passed in by the
2372b9238976Sth 		 * caller if it is the root of the ephemeral tree.
2373b9238976Sth 		 * Otherwise, the caller will remove it.
2374b9238976Sth 		 */
2375b9238976Sth 		if (e == eph && isTreeRoot == FALSE)
2376b9238976Sth 			return (0);
2377b9238976Sth 
2378b9238976Sth 		/*
2379b9238976Sth 		 * Okay, we have a leaf node, time
2380b9238976Sth 		 * to prune it!
2381b9238976Sth 		 *
2382b9238976Sth 		 * Note that prior can only be NULL if
2383b9238976Sth 		 * and only if it is the root of the
2384b9238976Sth 		 * ephemeral tree.
2385b9238976Sth 		 */
2386b9238976Sth 		prior = e->ne_prior;
2387b9238976Sth 
2388b9238976Sth 		mi = e->ne_mount;
2389b9238976Sth 		mutex_enter(&mi->mi_lock);
2390b9238976Sth 		vfsp = mi->mi_vfsp;
2391b87f76edSThomas Haynes 		ASSERT(vfsp != NULL);
2392b9238976Sth 
2393b9238976Sth 		/*
2394b9238976Sth 		 * Cleared by umount2_engine.
2395b9238976Sth 		 */
2396b9238976Sth 		VFS_HOLD(vfsp);
2397b9238976Sth 
2398b9238976Sth 		/*
2399b9238976Sth 		 * Inform nfs4_unmount to not recursively
2400b9238976Sth 		 * descend into this node's children when it
2401b9238976Sth 		 * gets processed.
2402b9238976Sth 		 */
2403b9238976Sth 		mi->mi_flags |= MI4_EPHEMERAL_RECURSED;
2404b9238976Sth 		mutex_exit(&mi->mi_lock);
2405b9238976Sth 
2406b9238976Sth 		error = umount2_engine(vfsp, flag, cr, FALSE);
2407b9238976Sth 		if (error) {
2408b9238976Sth 			/*
2409b9238976Sth 			 * We need to reenable nfs4_unmount's ability
2410b9238976Sth 			 * to recursively descend on this node.
2411b9238976Sth 			 */
2412b9238976Sth 			mutex_enter(&mi->mi_lock);
2413b9238976Sth 			mi->mi_flags &= ~MI4_EPHEMERAL_RECURSED;
2414b9238976Sth 			mutex_exit(&mi->mi_lock);
2415b9238976Sth 
2416b9238976Sth 			return (error);
2417b9238976Sth 		}
2418b9238976Sth 
2419b9238976Sth 		/*
2420b9238976Sth 		 * If we are the current node, we do not want to
2421b9238976Sth 		 * touch anything else. At this point, the only
2422b9238976Sth 		 * way the current node can have survived to here
2423b9238976Sth 		 * is if it is the root of the ephemeral tree and
2424b9238976Sth 		 * we are unmounting the enclosing mntinfo4.
2425b9238976Sth 		 */
2426b9238976Sth 		if (e == eph) {
2427b9238976Sth 			ASSERT(prior == NULL);
2428b9238976Sth 			return (0);
2429b9238976Sth 		}
2430b9238976Sth 
2431b9238976Sth 		/*
2432b9238976Sth 		 * Stitch up the prior node. Note that since
2433b9238976Sth 		 * we have handled the root of the tree, prior
2434b9238976Sth 		 * must be non-NULL.
2435b9238976Sth 		 */
2436b9238976Sth 		ASSERT(prior != NULL);
2437b9238976Sth 		if (prior->ne_child == e) {
2438b9238976Sth 			prior->ne_child = NULL;
2439b9238976Sth 		} else {
2440b9238976Sth 			ASSERT(prior->ne_peer == e);
2441b9238976Sth 
2442b9238976Sth 			prior->ne_peer = NULL;
2443b9238976Sth 		}
2444b9238976Sth 
2445b9238976Sth 		e = prior;
2446b9238976Sth 	}
2447b9238976Sth 
2448b9238976Sth 	/* NOTREACHED */
2449b9238976Sth }
2450b9238976Sth 
2451b9238976Sth /*
2452b9238976Sth  * Common code to safely release net_cnt_lock and net_tree_lock
2453b9238976Sth  */
2454b9238976Sth void
2455b9238976Sth nfs4_ephemeral_umount_unlock(bool_t *pmust_unlock,
24562f172c55SRobert Thurlow     nfs4_ephemeral_tree_t **pnet)
2457b9238976Sth {
2458b9238976Sth 	nfs4_ephemeral_tree_t	*net = *pnet;
2459b9238976Sth 
2460b9238976Sth 	if (*pmust_unlock) {
2461b9238976Sth 		mutex_enter(&net->net_cnt_lock);
2462b9238976Sth 		net->net_status &= ~NFS4_EPHEMERAL_TREE_UMOUNTING;
2463b9238976Sth 		mutex_exit(&net->net_cnt_lock);
2464b9238976Sth 
2465b9238976Sth 		mutex_exit(&net->net_tree_lock);
2466b9238976Sth 
2467b9238976Sth 		*pmust_unlock = FALSE;
2468b9238976Sth 	}
2469b9238976Sth }
2470b9238976Sth 
2471b9238976Sth /*
2472b9238976Sth  * While we may have removed any child or sibling nodes of this
2473b9238976Sth  * ephemeral node, we can not nuke it until we know that there
2474b9238976Sth  * were no actived vnodes on it. This will do that final
2475b9238976Sth  * work once we know it is not busy.
2476b9238976Sth  */
2477b9238976Sth void
2478b9238976Sth nfs4_ephemeral_umount_activate(mntinfo4_t *mi, bool_t *pmust_unlock,
24792f172c55SRobert Thurlow     nfs4_ephemeral_tree_t **pnet)
2480b9238976Sth {
2481b9238976Sth 	/*
2482b9238976Sth 	 * Now we need to get rid of the ephemeral data if it exists.
2483b9238976Sth 	 */
2484b9238976Sth 	mutex_enter(&mi->mi_lock);
2485b9238976Sth 	if (mi->mi_ephemeral) {
2486b9238976Sth 		/*
2487b9238976Sth 		 * If we are the root node of an ephemeral branch
2488b9238976Sth 		 * which is being removed, then we need to fixup
2489b9238976Sth 		 * pointers into and out of the node.
2490b9238976Sth 		 */
2491b9238976Sth 		if (!(mi->mi_flags & MI4_EPHEMERAL_RECURSED))
2492b9238976Sth 			nfs4_ephemeral_umount_cleanup(mi->mi_ephemeral);
2493b9238976Sth 
24942f172c55SRobert Thurlow 		nfs4_ephemeral_tree_rele(*pnet);
2495b9238976Sth 		ASSERT(mi->mi_ephemeral != NULL);
2496b9238976Sth 
2497b9238976Sth 		kmem_free(mi->mi_ephemeral, sizeof (*mi->mi_ephemeral));
2498b9238976Sth 		mi->mi_ephemeral = NULL;
2499b87f76edSThomas Haynes 		VFS_RELE(mi->mi_vfsp);
2500b87f76edSThomas Haynes 		MI4_RELE(mi);
2501b9238976Sth 	}
2502b9238976Sth 	mutex_exit(&mi->mi_lock);
2503b9238976Sth 
25042f172c55SRobert Thurlow 	nfs4_ephemeral_umount_unlock(pmust_unlock, pnet);
2505b9238976Sth }
2506b9238976Sth 
2507b9238976Sth /*
2508b9238976Sth  * Unmount an ephemeral node.
25092f172c55SRobert Thurlow  *
25102f172c55SRobert Thurlow  * Note that if this code fails, then it must unlock.
25112f172c55SRobert Thurlow  *
25122f172c55SRobert Thurlow  * If it succeeds, then the caller must be prepared to do so.
2513b9238976Sth  */
2514b9238976Sth int
2515b9238976Sth nfs4_ephemeral_umount(mntinfo4_t *mi, int flag, cred_t *cr,
25162f172c55SRobert Thurlow     bool_t *pmust_unlock, nfs4_ephemeral_tree_t **pnet)
2517b9238976Sth {
2518b9238976Sth 	int			error = 0;
2519b9238976Sth 	nfs4_ephemeral_t	*eph;
2520b9238976Sth 	nfs4_ephemeral_tree_t	*net;
2521b9238976Sth 	int			is_derooting = FALSE;
2522b9238976Sth 	int			is_recursed = FALSE;
2523d3a14591SThomas Haynes 	int			was_locked = FALSE;
2524d3a14591SThomas Haynes 
2525d3a14591SThomas Haynes 	/*
2526d3a14591SThomas Haynes 	 * Make sure to set the default state for cleaning
2527d3a14591SThomas Haynes 	 * up the tree in the caller (and on the way out).
2528d3a14591SThomas Haynes 	 */
25292f172c55SRobert Thurlow 	*pmust_unlock = FALSE;
2530b9238976Sth 
2531b9238976Sth 	/*
2532b9238976Sth 	 * The active vnodes on this file system may be ephemeral
2533b9238976Sth 	 * children. We need to check for and try to unmount them
2534b9238976Sth 	 * here. If any can not be unmounted, we are going
2535b9238976Sth 	 * to return EBUSY.
2536b9238976Sth 	 */
2537b9238976Sth 	mutex_enter(&mi->mi_lock);
2538b9238976Sth 
2539b9238976Sth 	/*
2540b9238976Sth 	 * If an ephemeral tree, we need to check to see if
2541b9238976Sth 	 * the lock is already held. If it is, then we need
2542b9238976Sth 	 * to see if we are being called as a result of
2543b9238976Sth 	 * the recursive removal of some node of the tree or
2544b9238976Sth 	 * if we are another attempt to remove the tree.
2545b9238976Sth 	 *
2546b9238976Sth 	 * mi_flags & MI4_EPHEMERAL indicates an ephemeral
2547b9238976Sth 	 * node. mi_ephemeral being non-NULL also does this.
2548b9238976Sth 	 *
2549b9238976Sth 	 * mi_ephemeral_tree being non-NULL is sufficient
2550b9238976Sth 	 * to also indicate either it is an ephemeral node
2551b9238976Sth 	 * or the enclosing mntinfo4.
2552b9238976Sth 	 *
2553b9238976Sth 	 * Do we need MI4_EPHEMERAL? Yes, it is useful for
2554b9238976Sth 	 * when we delete the ephemeral node and need to
2555b9238976Sth 	 * differentiate from an ephemeral node and the
2556b9238976Sth 	 * enclosing root node.
2557b9238976Sth 	 */
2558b9238976Sth 	*pnet = net = mi->mi_ephemeral_tree;
2559eabd0450Sth 	if (net == NULL) {
2560b9238976Sth 		mutex_exit(&mi->mi_lock);
2561eabd0450Sth 		return (0);
2562eabd0450Sth 	}
2563b9238976Sth 
2564eabd0450Sth 	eph = mi->mi_ephemeral;
2565eabd0450Sth 	is_recursed = mi->mi_flags & MI4_EPHEMERAL_RECURSED;
2566eabd0450Sth 	is_derooting = (eph == NULL);
2567b9238976Sth 
25682f172c55SRobert Thurlow 	mutex_enter(&net->net_cnt_lock);
25692f172c55SRobert Thurlow 
2570eabd0450Sth 	/*
2571eabd0450Sth 	 * If this is not recursion, then we need to
25722f172c55SRobert Thurlow 	 * check to see if a harvester thread has
25732f172c55SRobert Thurlow 	 * already grabbed the lock.
2574eabd0450Sth 	 *
25752f172c55SRobert Thurlow 	 * After we exit this branch, we may not
25762f172c55SRobert Thurlow 	 * blindly return, we need to jump to
25772f172c55SRobert Thurlow 	 * is_busy!
2578eabd0450Sth 	 */
2579eabd0450Sth 	if (!is_recursed) {
2580eabd0450Sth 		if (net->net_status &
2581eabd0450Sth 		    NFS4_EPHEMERAL_TREE_LOCKED) {
2582b9238976Sth 			/*
2583d3a14591SThomas Haynes 			 * If the tree is locked, we need
2584d3a14591SThomas Haynes 			 * to decide whether we are the
2585d3a14591SThomas Haynes 			 * harvester or some explicit call
2586d3a14591SThomas Haynes 			 * for a umount. The only way that
2587d3a14591SThomas Haynes 			 * we are the harvester is if
2588d3a14591SThomas Haynes 			 * MS_SYSSPACE is set.
2589d3a14591SThomas Haynes 			 *
2590d3a14591SThomas Haynes 			 * We only let the harvester through
2591d3a14591SThomas Haynes 			 * at this point.
2592eabd0450Sth 			 *
2593eabd0450Sth 			 * We return EBUSY so that the
2594eabd0450Sth 			 * caller knows something is
2595eabd0450Sth 			 * going on. Note that by that
2596eabd0450Sth 			 * time, the umount in the other
2597eabd0450Sth 			 * thread may have already occured.
2598b9238976Sth 			 */
2599d3a14591SThomas Haynes 			if (!(flag & MS_SYSSPACE)) {
2600d3a14591SThomas Haynes 				mutex_exit(&net->net_cnt_lock);
2601d3a14591SThomas Haynes 				mutex_exit(&mi->mi_lock);
2602d3a14591SThomas Haynes 
2603d3a14591SThomas Haynes 				return (EBUSY);
2604d3a14591SThomas Haynes 			}
2605d3a14591SThomas Haynes 
2606d3a14591SThomas Haynes 			was_locked = TRUE;
2607d3a14591SThomas Haynes 		}
2608eabd0450Sth 	}
26092f172c55SRobert Thurlow 
26102f172c55SRobert Thurlow 	mutex_exit(&net->net_cnt_lock);
2611eabd0450Sth 	mutex_exit(&mi->mi_lock);
2612b9238976Sth 
2613eabd0450Sth 	/*
2614d3a14591SThomas Haynes 	 * If we are not the harvester, we need to check
2615d3a14591SThomas Haynes 	 * to see if we need to grab the tree lock.
2616eabd0450Sth 	 */
2617d3a14591SThomas Haynes 	if (was_locked == FALSE) {
2618d3a14591SThomas Haynes 		/*
2619d3a14591SThomas Haynes 		 * If we grab the lock, it means that no other
2620d3a14591SThomas Haynes 		 * operation is working on the tree. If we don't
2621d3a14591SThomas Haynes 		 * grab it, we need to decide if this is because
2622d3a14591SThomas Haynes 		 * we are a recursive call or a new operation.
2623d3a14591SThomas Haynes 		 */
2624d3a14591SThomas Haynes 		if (mutex_tryenter(&net->net_tree_lock)) {
2625d3a14591SThomas Haynes 			*pmust_unlock = TRUE;
2626d3a14591SThomas Haynes 		} else {
2627b9238976Sth 			/*
2628d3a14591SThomas Haynes 			 * If we are a recursive call, we can
2629d3a14591SThomas Haynes 			 * proceed without the lock.
2630d3a14591SThomas Haynes 			 * Otherwise we have to wait until
2631d3a14591SThomas Haynes 			 * the lock becomes free.
2632b9238976Sth 			 */
2633d3a14591SThomas Haynes 			if (!is_recursed) {
2634d3a14591SThomas Haynes 				mutex_enter(&net->net_cnt_lock);
2635d3a14591SThomas Haynes 				if (net->net_status &
2636d3a14591SThomas Haynes 				    (NFS4_EPHEMERAL_TREE_DEROOTING
2637d3a14591SThomas Haynes 				    | NFS4_EPHEMERAL_TREE_INVALID)) {
2638d3a14591SThomas Haynes 					mutex_exit(&net->net_cnt_lock);
2639d3a14591SThomas Haynes 					goto is_busy;
2640d3a14591SThomas Haynes 				}
2641d3a14591SThomas Haynes 				mutex_exit(&net->net_cnt_lock);
2642b9238976Sth 
2643d3a14591SThomas Haynes 				/*
2644d3a14591SThomas Haynes 				 * We can't hold any other locks whilst
2645d3a14591SThomas Haynes 				 * we wait on this to free up.
2646d3a14591SThomas Haynes 				 */
2647d3a14591SThomas Haynes 				mutex_enter(&net->net_tree_lock);
2648b9238976Sth 
2649d3a14591SThomas Haynes 				/*
2650d3a14591SThomas Haynes 				 * Note that while mi->mi_ephemeral
2651d3a14591SThomas Haynes 				 * may change and thus we have to
2652d3a14591SThomas Haynes 				 * update eph, it is the case that
2653d3a14591SThomas Haynes 				 * we have tied down net and
2654d3a14591SThomas Haynes 				 * do not care if mi->mi_ephemeral_tree
2655d3a14591SThomas Haynes 				 * has changed.
2656d3a14591SThomas Haynes 				 */
2657d3a14591SThomas Haynes 				mutex_enter(&mi->mi_lock);
2658d3a14591SThomas Haynes 				eph = mi->mi_ephemeral;
2659d3a14591SThomas Haynes 				mutex_exit(&mi->mi_lock);
2660d3a14591SThomas Haynes 
2661d3a14591SThomas Haynes 				/*
2662d3a14591SThomas Haynes 				 * Okay, we need to see if either the
2663d3a14591SThomas Haynes 				 * tree got nuked or the current node
2664d3a14591SThomas Haynes 				 * got nuked. Both of which will cause
2665d3a14591SThomas Haynes 				 * an error.
2666d3a14591SThomas Haynes 				 *
2667d3a14591SThomas Haynes 				 * Note that a subsequent retry of the
2668d3a14591SThomas Haynes 				 * umount shall work.
2669d3a14591SThomas Haynes 				 */
2670d3a14591SThomas Haynes 				mutex_enter(&net->net_cnt_lock);
2671d3a14591SThomas Haynes 				if (net->net_status &
2672d3a14591SThomas Haynes 				    NFS4_EPHEMERAL_TREE_INVALID ||
2673d3a14591SThomas Haynes 				    (!is_derooting && eph == NULL)) {
2674d3a14591SThomas Haynes 					mutex_exit(&net->net_cnt_lock);
2675d3a14591SThomas Haynes 					mutex_exit(&net->net_tree_lock);
2676d3a14591SThomas Haynes 					goto is_busy;
2677d3a14591SThomas Haynes 				}
2678eabd0450Sth 				mutex_exit(&net->net_cnt_lock);
2679d3a14591SThomas Haynes 				*pmust_unlock = TRUE;
2680eabd0450Sth 			}
2681eabd0450Sth 		}
2682eabd0450Sth 	}
2683eabd0450Sth 
2684eabd0450Sth 	/*
2685eabd0450Sth 	 * Only once we have grabbed the lock can we mark what we
2686eabd0450Sth 	 * are planning on doing to the ephemeral tree.
2687eabd0450Sth 	 */
2688eabd0450Sth 	if (*pmust_unlock) {
2689eabd0450Sth 		mutex_enter(&net->net_cnt_lock);
2690eabd0450Sth 		net->net_status |= NFS4_EPHEMERAL_TREE_UMOUNTING;
2691eabd0450Sth 
2692eabd0450Sth 		/*
2693eabd0450Sth 		 * Check to see if we are nuking the root.
2694eabd0450Sth 		 */
2695eabd0450Sth 		if (is_derooting)
2696eabd0450Sth 			net->net_status |=
2697eabd0450Sth 			    NFS4_EPHEMERAL_TREE_DEROOTING;
2698eabd0450Sth 		mutex_exit(&net->net_cnt_lock);
2699eabd0450Sth 	}
2700eabd0450Sth 
2701eabd0450Sth 	if (!is_derooting) {
2702eabd0450Sth 		/*
2703eabd0450Sth 		 * Only work on children if the caller has not already
2704eabd0450Sth 		 * done so.
2705eabd0450Sth 		 */
2706eabd0450Sth 		if (!is_recursed) {
2707eabd0450Sth 			ASSERT(eph != NULL);
2708eabd0450Sth 
2709eabd0450Sth 			error = nfs4_ephemeral_unmount_engine(eph,
2710eabd0450Sth 			    FALSE, flag, cr);
2711eabd0450Sth 			if (error)
2712eabd0450Sth 				goto is_busy;
2713eabd0450Sth 		}
2714eabd0450Sth 	} else {
2715eabd0450Sth 		eph = net->net_root;
2716eabd0450Sth 
2717eabd0450Sth 		/*
2718eabd0450Sth 		 * Only work if there is something there.
2719eabd0450Sth 		 */
2720eabd0450Sth 		if (eph) {
2721eabd0450Sth 			error = nfs4_ephemeral_unmount_engine(eph, TRUE,
2722eabd0450Sth 			    flag, cr);
2723eabd0450Sth 			if (error) {
2724eabd0450Sth 				mutex_enter(&net->net_cnt_lock);
2725eabd0450Sth 				net->net_status &=
2726eabd0450Sth 				    ~NFS4_EPHEMERAL_TREE_DEROOTING;
2727eabd0450Sth 				mutex_exit(&net->net_cnt_lock);
2728eabd0450Sth 				goto is_busy;
2729eabd0450Sth 			}
2730b9238976Sth 
2731b9238976Sth 			/*
2732eabd0450Sth 			 * Nothing else which goes wrong will
2733eabd0450Sth 			 * invalidate the blowing away of the
2734eabd0450Sth 			 * ephmeral tree.
2735b9238976Sth 			 */
2736eabd0450Sth 			net->net_root = NULL;
2737b9238976Sth 		}
2738eabd0450Sth 
2739eabd0450Sth 		/*
2740eabd0450Sth 		 * We have derooted and we have caused the tree to be
2741d3a14591SThomas Haynes 		 * invalidated.
2742eabd0450Sth 		 */
2743eabd0450Sth 		mutex_enter(&net->net_cnt_lock);
2744eabd0450Sth 		net->net_status &= ~NFS4_EPHEMERAL_TREE_DEROOTING;
2745eabd0450Sth 		net->net_status |= NFS4_EPHEMERAL_TREE_INVALID;
27462f172c55SRobert Thurlow 		DTRACE_NFSV4_1(nfs4clnt__dbg__ephemeral__tree__derooting,
27472f172c55SRobert Thurlow 		    uint_t, net->net_refcnt);
27482f172c55SRobert Thurlow 
27492f172c55SRobert Thurlow 		/*
27502f172c55SRobert Thurlow 		 * We will not finalize this node, so safe to
27512f172c55SRobert Thurlow 		 * release it.
27522f172c55SRobert Thurlow 		 */
27532f172c55SRobert Thurlow 		nfs4_ephemeral_tree_decr(net);
2754eabd0450Sth 		mutex_exit(&net->net_cnt_lock);
2755eabd0450Sth 
2756d3a14591SThomas Haynes 		if (was_locked == FALSE)
2757d3a14591SThomas Haynes 			mutex_exit(&net->net_tree_lock);
2758d3a14591SThomas Haynes 
2759d3a14591SThomas Haynes 		/*
2760d3a14591SThomas Haynes 		 * We have just blown away any notation of this
27612f172c55SRobert Thurlow 		 * tree being locked or having a refcnt.
27622f172c55SRobert Thurlow 		 * We can't let the caller try to clean things up.
2763d3a14591SThomas Haynes 		 */
2764d3a14591SThomas Haynes 		*pmust_unlock = FALSE;
2765d3a14591SThomas Haynes 
2766eabd0450Sth 		/*
2767d708af74SThomas Haynes 		 * At this point, the tree should no longer be
2768d708af74SThomas Haynes 		 * associated with the mntinfo4. We need to pull
2769d708af74SThomas Haynes 		 * it off there and let the harvester take
2770eabd0450Sth 		 * care of it once the refcnt drops.
2771eabd0450Sth 		 */
2772eabd0450Sth 		mutex_enter(&mi->mi_lock);
2773eabd0450Sth 		mi->mi_ephemeral_tree = NULL;
2774b9238976Sth 		mutex_exit(&mi->mi_lock);
2775b9238976Sth 	}
2776b9238976Sth 
2777b9238976Sth 	return (0);
2778b9238976Sth 
2779b9238976Sth is_busy:
2780b9238976Sth 
27812f172c55SRobert Thurlow 	nfs4_ephemeral_umount_unlock(pmust_unlock, pnet);
2782b9238976Sth 
2783b9238976Sth 	return (error);
2784b9238976Sth }
2785b9238976Sth 
2786b9238976Sth /*
2787b9238976Sth  * Do the umount and record any error in the parent.
2788b9238976Sth  */
2789b9238976Sth static void
2790b9238976Sth nfs4_ephemeral_record_umount(vfs_t *vfsp, int flag,
2791b9238976Sth     nfs4_ephemeral_t *e, nfs4_ephemeral_t *prior)
2792b9238976Sth {
2793b9238976Sth 	int	error;
2794b9238976Sth 
2795b87f76edSThomas Haynes 	/*
2796b87f76edSThomas Haynes 	 * Only act on if the fs is still mounted.
2797b87f76edSThomas Haynes 	 */
2798b87f76edSThomas Haynes 	if (vfsp == NULL)
2799b87f76edSThomas Haynes 		return;
2800b87f76edSThomas Haynes 
2801b9238976Sth 	error = umount2_engine(vfsp, flag, kcred, FALSE);
2802b9238976Sth 	if (error) {
2803b9238976Sth 		if (prior) {
2804b9238976Sth 			if (prior->ne_child == e)
2805b9238976Sth 				prior->ne_state |=
2806b9238976Sth 				    NFS4_EPHEMERAL_CHILD_ERROR;
2807b9238976Sth 			else
2808b9238976Sth 				prior->ne_state |=
2809b9238976Sth 				    NFS4_EPHEMERAL_PEER_ERROR;
2810b9238976Sth 		}
2811b9238976Sth 	}
2812b9238976Sth }
2813b9238976Sth 
2814b9238976Sth /*
2815b9238976Sth  * For each tree in the forest (where the forest is in
2816b9238976Sth  * effect all of the ephemeral trees for this zone),
2817b9238976Sth  * scan to see if a node can be unmounted. Note that
2818b9238976Sth  * unlike nfs4_ephemeral_unmount_engine(), we do
2819b9238976Sth  * not process the current node before children or
2820b9238976Sth  * siblings. I.e., if a node can be unmounted, we
2821b9238976Sth  * do not recursively check to see if the nodes
2822b9238976Sth  * hanging off of it can also be unmounted.
2823b9238976Sth  *
2824b9238976Sth  * Instead, we delve down deep to try and remove the
2825b9238976Sth  * children first. Then, because we share code with
2826b9238976Sth  * nfs4_ephemeral_unmount_engine(), we will try
2827b9238976Sth  * them again. This could be a performance issue in
2828b9238976Sth  * the future.
2829b9238976Sth  *
2830b9238976Sth  * Also note that unlike nfs4_ephemeral_unmount_engine(),
2831b9238976Sth  * we do not halt on an error. We will not remove the
2832b9238976Sth  * current node, but we will keep on trying to remove
2833b9238976Sth  * the others.
2834b9238976Sth  *
2835b9238976Sth  * force indicates that we want the unmount to occur
2836b9238976Sth  * even if there is something blocking it.
2837b9238976Sth  *
2838b9238976Sth  * time_check indicates that we want to see if the
2839b9238976Sth  * mount has expired past mount_to or not. Typically
2840b9238976Sth  * we want to do this and only on a shutdown of the
2841b9238976Sth  * zone would we want to ignore the check.
2842b9238976Sth  */
2843b9238976Sth static void
2844b9238976Sth nfs4_ephemeral_harvest_forest(nfs4_trigger_globals_t *ntg,
2845b9238976Sth     bool_t force, bool_t time_check)
2846b9238976Sth {
2847b9238976Sth 	nfs4_ephemeral_tree_t	*net;
2848b9238976Sth 	nfs4_ephemeral_tree_t	*prev = NULL;
2849b9238976Sth 	nfs4_ephemeral_tree_t	*next;
2850b9238976Sth 	nfs4_ephemeral_t	*e;
2851b9238976Sth 	nfs4_ephemeral_t	*prior;
2852b9238976Sth 	time_t			now = gethrestime_sec();
2853b9238976Sth 
2854b9238976Sth 	nfs4_ephemeral_tree_t	*harvest = NULL;
2855b9238976Sth 
2856b9238976Sth 	int			flag;
2857b9238976Sth 
2858b9238976Sth 	mntinfo4_t		*mi;
2859b9238976Sth 	vfs_t			*vfsp;
2860b9238976Sth 
2861b9238976Sth 	if (force)
2862d3a14591SThomas Haynes 		flag = MS_FORCE | MS_SYSSPACE;
2863b9238976Sth 	else
2864d3a14591SThomas Haynes 		flag = MS_SYSSPACE;
2865b9238976Sth 
2866b9238976Sth 	mutex_enter(&ntg->ntg_forest_lock);
2867b9238976Sth 	for (net = ntg->ntg_forest; net != NULL; net = next) {
2868b9238976Sth 		next = net->net_next;
2869b9238976Sth 
2870d3a14591SThomas Haynes 		nfs4_ephemeral_tree_hold(net);
2871b9238976Sth 
2872b9238976Sth 		mutex_enter(&net->net_tree_lock);
2873b9238976Sth 
2874b9238976Sth 		/*
2875b9238976Sth 		 * Let the unmount code know that the
2876b9238976Sth 		 * tree is already locked!
2877b9238976Sth 		 */
2878b9238976Sth 		mutex_enter(&net->net_cnt_lock);
2879b9238976Sth 		net->net_status |= NFS4_EPHEMERAL_TREE_LOCKED;
2880b9238976Sth 		mutex_exit(&net->net_cnt_lock);
2881b9238976Sth 
2882b9238976Sth 		/*
2883b9238976Sth 		 * If the intent is force all ephemeral nodes to
2884b9238976Sth 		 * be unmounted in this zone, we can short circuit a
2885b9238976Sth 		 * lot of tree traversal and simply zap the root node.
2886b9238976Sth 		 */
2887b9238976Sth 		if (force) {
2888b9238976Sth 			if (net->net_root) {
2889b9238976Sth 				mi = net->net_root->ne_mount;
2890b87f76edSThomas Haynes 
2891b9238976Sth 				vfsp = mi->mi_vfsp;
2892b87f76edSThomas Haynes 				ASSERT(vfsp != NULL);
2893b9238976Sth 
2894b9238976Sth 				/*
2895b9238976Sth 				 * Cleared by umount2_engine.
2896b9238976Sth 				 */
2897b9238976Sth 				VFS_HOLD(vfsp);
2898b9238976Sth 
2899b9238976Sth 				(void) umount2_engine(vfsp, flag,
2900b9238976Sth 				    kcred, FALSE);
2901b9238976Sth 
2902b9238976Sth 				goto check_done;
2903b9238976Sth 			}
2904b9238976Sth 		}
2905b9238976Sth 
2906b9238976Sth 		e = net->net_root;
2907b9238976Sth 		if (e)
2908b9238976Sth 			e->ne_state = NFS4_EPHEMERAL_VISIT_CHILD;
2909b9238976Sth 
2910b9238976Sth 		while (e) {
2911b9238976Sth 			if (e->ne_state == NFS4_EPHEMERAL_VISIT_CHILD) {
2912b9238976Sth 				e->ne_state = NFS4_EPHEMERAL_VISIT_SIBLING;
2913b9238976Sth 				if (e->ne_child) {
2914b9238976Sth 					e = e->ne_child;
2915b9238976Sth 					e->ne_state =
2916b9238976Sth 					    NFS4_EPHEMERAL_VISIT_CHILD;
2917b9238976Sth 				}
2918b9238976Sth 
2919b9238976Sth 				continue;
2920b9238976Sth 			} else if (e->ne_state ==
2921b9238976Sth 			    NFS4_EPHEMERAL_VISIT_SIBLING) {
2922b9238976Sth 				e->ne_state = NFS4_EPHEMERAL_PROCESS_ME;
2923b9238976Sth 				if (e->ne_peer) {
2924b9238976Sth 					e = e->ne_peer;
2925b9238976Sth 					e->ne_state =
2926b9238976Sth 					    NFS4_EPHEMERAL_VISIT_CHILD;
2927b9238976Sth 				}
2928b9238976Sth 
2929b9238976Sth 				continue;
2930b9238976Sth 			} else if (e->ne_state ==
2931b9238976Sth 			    NFS4_EPHEMERAL_CHILD_ERROR) {
2932b9238976Sth 				prior = e->ne_prior;
2933b9238976Sth 
2934b9238976Sth 				/*
2935b9238976Sth 				 * If a child reported an error, do
2936b9238976Sth 				 * not bother trying to unmount.
2937b9238976Sth 				 *
2938b9238976Sth 				 * If your prior node is a parent,
2939b9238976Sth 				 * pass the error up such that they
2940b9238976Sth 				 * also do not try to unmount.
2941b9238976Sth 				 *
2942b9238976Sth 				 * However, if your prior is a sibling,
2943b9238976Sth 				 * let them try to unmount if they can.
2944b9238976Sth 				 */
2945b9238976Sth 				if (prior) {
2946b9238976Sth 					if (prior->ne_child == e)
2947b9238976Sth 						prior->ne_state |=
2948b9238976Sth 						    NFS4_EPHEMERAL_CHILD_ERROR;
2949b9238976Sth 					else
2950b9238976Sth 						prior->ne_state |=
2951b9238976Sth 						    NFS4_EPHEMERAL_PEER_ERROR;
2952b9238976Sth 				}
2953b9238976Sth 
2954b9238976Sth 				/*
2955b9238976Sth 				 * Clear the error and if needed, process peers.
2956b9238976Sth 				 *
2957b9238976Sth 				 * Once we mask out the error, we know whether
2958b9238976Sth 				 * or we have to process another node.
2959b9238976Sth 				 */
2960b9238976Sth 				e->ne_state &= ~NFS4_EPHEMERAL_CHILD_ERROR;
2961b9238976Sth 				if (e->ne_state == NFS4_EPHEMERAL_PROCESS_ME)
2962b9238976Sth 					e = prior;
2963b9238976Sth 
2964b9238976Sth 				continue;
2965b9238976Sth 			} else if (e->ne_state ==
2966b9238976Sth 			    NFS4_EPHEMERAL_PEER_ERROR) {
2967b9238976Sth 				prior = e->ne_prior;
2968b9238976Sth 
2969b9238976Sth 				if (prior) {
2970b9238976Sth 					if (prior->ne_child == e)
2971b9238976Sth 						prior->ne_state =
2972b9238976Sth 						    NFS4_EPHEMERAL_CHILD_ERROR;
2973b9238976Sth 					else
2974b9238976Sth 						prior->ne_state =
2975b9238976Sth 						    NFS4_EPHEMERAL_PEER_ERROR;
2976b9238976Sth 				}
2977b9238976Sth 
2978b9238976Sth 				/*
2979b9238976Sth 				 * Clear the error from this node and do the
2980b9238976Sth 				 * correct processing.
2981b9238976Sth 				 */
2982b9238976Sth 				e->ne_state &= ~NFS4_EPHEMERAL_PEER_ERROR;
2983b9238976Sth 				continue;
2984b9238976Sth 			}
2985b9238976Sth 
2986b9238976Sth 			prior = e->ne_prior;
2987b9238976Sth 			e->ne_state = NFS4_EPHEMERAL_OK;
2988b9238976Sth 
2989b9238976Sth 			/*
2990b9238976Sth 			 * It must be the case that we need to process
2991b9238976Sth 			 * this node.
2992b9238976Sth 			 */
2993b9238976Sth 			if (!time_check ||
2994b9238976Sth 			    now - e->ne_ref_time > e->ne_mount_to) {
2995b9238976Sth 				mi = e->ne_mount;
2996b9238976Sth 				vfsp = mi->mi_vfsp;
2997b9238976Sth 
2998b9238976Sth 				/*
2999b9238976Sth 				 * Cleared by umount2_engine.
3000b9238976Sth 				 */
3001b87f76edSThomas Haynes 				if (vfsp != NULL)
3002b87f76edSThomas Haynes 					VFS_HOLD(vfsp);
3003b9238976Sth 
3004b9238976Sth 				/*
3005b9238976Sth 				 * Note that we effectively work down to the
3006b9238976Sth 				 * leaf nodes first, try to unmount them,
3007b9238976Sth 				 * then work our way back up into the leaf
3008b9238976Sth 				 * nodes.
3009b9238976Sth 				 *
3010b9238976Sth 				 * Also note that we deal with a lot of
3011b9238976Sth 				 * complexity by sharing the work with
3012b9238976Sth 				 * the manual unmount code.
3013b9238976Sth 				 */
3014b9238976Sth 				nfs4_ephemeral_record_umount(vfsp, flag,
3015b9238976Sth 				    e, prior);
3016b9238976Sth 			}
3017b9238976Sth 
3018b9238976Sth 			e = prior;
3019b9238976Sth 		}
3020b9238976Sth 
3021b9238976Sth check_done:
3022b9238976Sth 
3023b9238976Sth 		/*
3024d3a14591SThomas Haynes 		 * At this point we are done processing this tree.
3025d3a14591SThomas Haynes 		 *
30262f172c55SRobert Thurlow 		 * If the tree is invalid and we were the only reference
3027d3a14591SThomas Haynes 		 * to it, then we push it on the local linked list
3028d3a14591SThomas Haynes 		 * to remove it at the end. We avoid that action now
3029d3a14591SThomas Haynes 		 * to keep the tree processing going along at a fair clip.
3030d3a14591SThomas Haynes 		 *
30312f172c55SRobert Thurlow 		 * Else, even if we were the only reference, we
30322f172c55SRobert Thurlow 		 * allow it to be reused as needed.
3033b9238976Sth 		 */
3034b9238976Sth 		mutex_enter(&net->net_cnt_lock);
30352f172c55SRobert Thurlow 		nfs4_ephemeral_tree_decr(net);
30362f172c55SRobert Thurlow 		if (net->net_refcnt == 0 &&
3037b9238976Sth 		    net->net_status & NFS4_EPHEMERAL_TREE_INVALID) {
3038b9238976Sth 			net->net_status &= ~NFS4_EPHEMERAL_TREE_LOCKED;
3039b9238976Sth 			mutex_exit(&net->net_cnt_lock);
3040b9238976Sth 			mutex_exit(&net->net_tree_lock);
3041b9238976Sth 
3042b9238976Sth 			if (prev)
3043b9238976Sth 				prev->net_next = net->net_next;
3044b9238976Sth 			else
3045b9238976Sth 				ntg->ntg_forest = net->net_next;
3046b9238976Sth 
3047b9238976Sth 			net->net_next = harvest;
3048b9238976Sth 			harvest = net;
3049b87f76edSThomas Haynes 
3050b87f76edSThomas Haynes 			VFS_RELE(net->net_mount->mi_vfsp);
3051b87f76edSThomas Haynes 			MI4_RELE(net->net_mount);
3052b87f76edSThomas Haynes 
3053b9238976Sth 			continue;
3054b9238976Sth 		}
3055b9238976Sth 
3056b9238976Sth 		net->net_status &= ~NFS4_EPHEMERAL_TREE_LOCKED;
3057b9238976Sth 		mutex_exit(&net->net_cnt_lock);
3058b9238976Sth 		mutex_exit(&net->net_tree_lock);
3059b9238976Sth 
3060b9238976Sth 		prev = net;
3061b9238976Sth 	}
3062b9238976Sth 	mutex_exit(&ntg->ntg_forest_lock);
3063b9238976Sth 
3064b9238976Sth 	for (net = harvest; net != NULL; net = next) {
3065b9238976Sth 		next = net->net_next;
3066b9238976Sth 
3067b9238976Sth 		mutex_destroy(&net->net_tree_lock);
3068b9238976Sth 		mutex_destroy(&net->net_cnt_lock);
3069b9238976Sth 		kmem_free(net, sizeof (*net));
3070b9238976Sth 	}
3071b9238976Sth }
3072b9238976Sth 
3073b9238976Sth /*
3074b9238976Sth  * This is the thread which decides when the harvesting
3075b9238976Sth  * can proceed and when to kill it off for this zone.
3076b9238976Sth  */
3077b9238976Sth static void
3078b9238976Sth nfs4_ephemeral_harvester(nfs4_trigger_globals_t *ntg)
3079b9238976Sth {
3080b9238976Sth 	clock_t		timeleft;
3081b9238976Sth 	zone_t		*zone = curproc->p_zone;
3082b9238976Sth 
3083b9238976Sth 	for (;;) {
3084d3d50737SRafael Vanoni 		timeleft = zone_status_timedwait(zone, ddi_get_lbolt() +
3085b9238976Sth 		    nfs4_trigger_thread_timer * hz, ZONE_IS_SHUTTING_DOWN);
3086b9238976Sth 
3087b9238976Sth 		/*
3088b9238976Sth 		 * zone is exiting...
3089b9238976Sth 		 */
3090b9238976Sth 		if (timeleft != -1) {
3091b9238976Sth 			ASSERT(zone_status_get(zone) >= ZONE_IS_SHUTTING_DOWN);
3092b9238976Sth 			zthread_exit();
3093b9238976Sth 			/* NOTREACHED */
3094b9238976Sth 		}
3095b9238976Sth 
3096b9238976Sth 		/*
3097b9238976Sth 		 * Only bother scanning if there is potential
3098b9238976Sth 		 * work to be done.
3099b9238976Sth 		 */
3100b9238976Sth 		if (ntg->ntg_forest == NULL)
3101b9238976Sth 			continue;
3102b9238976Sth 
3103b9238976Sth 		/*
3104b9238976Sth 		 * Now scan the list and get rid of everything which
3105b9238976Sth 		 * is old.
3106b9238976Sth 		 */
3107b9238976Sth 		nfs4_ephemeral_harvest_forest(ntg, FALSE, TRUE);
3108b9238976Sth 	}
3109b9238976Sth 
3110b9238976Sth 	/* NOTREACHED */
3111b9238976Sth }
3112b9238976Sth 
3113b9238976Sth /*
3114b9238976Sth  * The zone specific glue needed to start the unmount harvester.
3115b9238976Sth  *
3116b9238976Sth  * Note that we want to avoid holding the mutex as long as possible,
3117b9238976Sth  * hence the multiple checks.
3118b9238976Sth  *
3119b9238976Sth  * The caller should avoid us getting down here in the first
3120b9238976Sth  * place.
3121b9238976Sth  */
3122b9238976Sth static void
3123b9238976Sth nfs4_ephemeral_start_harvester(nfs4_trigger_globals_t *ntg)
3124b9238976Sth {
3125b9238976Sth 	/*
3126b9238976Sth 	 * It got started before we got here...
3127b9238976Sth 	 */
3128b9238976Sth 	if (ntg->ntg_thread_started)
3129b9238976Sth 		return;
3130b9238976Sth 
3131b9238976Sth 	mutex_enter(&nfs4_ephemeral_thread_lock);
3132b9238976Sth 
3133b9238976Sth 	if (ntg->ntg_thread_started) {
3134b9238976Sth 		mutex_exit(&nfs4_ephemeral_thread_lock);
3135b9238976Sth 		return;
3136b9238976Sth 	}
3137b9238976Sth 
3138b9238976Sth 	/*
3139b9238976Sth 	 * Start the unmounter harvester thread for this zone.
3140b9238976Sth 	 */
3141b9238976Sth 	(void) zthread_create(NULL, 0, nfs4_ephemeral_harvester,
3142b9238976Sth 	    ntg, 0, minclsyspri);
3143b9238976Sth 
3144b9238976Sth 	ntg->ntg_thread_started = TRUE;
3145b9238976Sth 	mutex_exit(&nfs4_ephemeral_thread_lock);
3146b9238976Sth }
3147b9238976Sth 
3148b9238976Sth /*ARGSUSED*/
3149b9238976Sth static void *
3150b9238976Sth nfs4_ephemeral_zsd_create(zoneid_t zoneid)
3151b9238976Sth {
3152b9238976Sth 	nfs4_trigger_globals_t	*ntg;
3153b9238976Sth 
3154b9238976Sth 	ntg = kmem_zalloc(sizeof (*ntg), KM_SLEEP);
3155b9238976Sth 	ntg->ntg_thread_started = FALSE;
3156b9238976Sth 
3157b9238976Sth 	/*
3158b9238976Sth 	 * This is the default....
3159b9238976Sth 	 */
3160b9238976Sth 	ntg->ntg_mount_to = nfs4_trigger_thread_timer;
3161b9238976Sth 
3162b9238976Sth 	mutex_init(&ntg->ntg_forest_lock, NULL,
3163b9238976Sth 	    MUTEX_DEFAULT, NULL);
3164b9238976Sth 
3165b9238976Sth 	return (ntg);
3166b9238976Sth }
3167b9238976Sth 
3168b9238976Sth /*
3169b9238976Sth  * Try a nice gentle walk down the forest and convince
3170b9238976Sth  * all of the trees to gracefully give it up.
3171b9238976Sth  */
3172b9238976Sth /*ARGSUSED*/
3173b9238976Sth static void
3174b9238976Sth nfs4_ephemeral_zsd_shutdown(zoneid_t zoneid, void *arg)
3175b9238976Sth {
3176b9238976Sth 	nfs4_trigger_globals_t	*ntg = arg;
3177b9238976Sth 
3178b9238976Sth 	if (!ntg)
3179b9238976Sth 		return;
3180b9238976Sth 
3181b9238976Sth 	nfs4_ephemeral_harvest_forest(ntg, FALSE, FALSE);
3182b9238976Sth }
3183b9238976Sth 
3184b9238976Sth /*
3185b9238976Sth  * Race along the forest and rip all of the trees out by
3186b9238976Sth  * their rootballs!
3187b9238976Sth  */
3188b9238976Sth /*ARGSUSED*/
3189b9238976Sth static void
3190b9238976Sth nfs4_ephemeral_zsd_destroy(zoneid_t zoneid, void *arg)
3191b9238976Sth {
3192b9238976Sth 	nfs4_trigger_globals_t	*ntg = arg;
3193b9238976Sth 
3194b9238976Sth 	if (!ntg)
3195b9238976Sth 		return;
3196b9238976Sth 
3197b9238976Sth 	nfs4_ephemeral_harvest_forest(ntg, TRUE, FALSE);
3198b9238976Sth 
3199b9238976Sth 	mutex_destroy(&ntg->ntg_forest_lock);
3200b9238976Sth 	kmem_free(ntg, sizeof (*ntg));
3201b9238976Sth }
3202b9238976Sth 
3203b9238976Sth /*
3204b9238976Sth  * This is the zone independent cleanup needed for
3205b9238976Sth  * emphemeral mount processing.
3206b9238976Sth  */
3207b9238976Sth void
3208b9238976Sth nfs4_ephemeral_fini(void)
3209b9238976Sth {
3210b9238976Sth 	(void) zone_key_delete(nfs4_ephemeral_key);
3211b9238976Sth 	mutex_destroy(&nfs4_ephemeral_thread_lock);
3212b9238976Sth }
3213b9238976Sth 
3214b9238976Sth /*
3215b9238976Sth  * This is the zone independent initialization needed for
3216b9238976Sth  * emphemeral mount processing.
3217b9238976Sth  */
3218b9238976Sth void
3219b9238976Sth nfs4_ephemeral_init(void)
3220b9238976Sth {
3221b9238976Sth 	mutex_init(&nfs4_ephemeral_thread_lock, NULL, MUTEX_DEFAULT,
3222b9238976Sth 	    NULL);
3223b9238976Sth 
3224b9238976Sth 	zone_key_create(&nfs4_ephemeral_key, nfs4_ephemeral_zsd_create,
3225b9238976Sth 	    nfs4_ephemeral_zsd_shutdown, nfs4_ephemeral_zsd_destroy);
3226b9238976Sth }
3227b9238976Sth 
3228b9238976Sth /*
3229b9238976Sth  * nfssys() calls this function to set the per-zone
3230b9238976Sth  * value of mount_to to drive when an ephemeral mount is
3231b9238976Sth  * timed out. Each mount will grab a copy of this value
3232b9238976Sth  * when mounted.
3233b9238976Sth  */
3234b9238976Sth void
3235b9238976Sth nfs4_ephemeral_set_mount_to(uint_t mount_to)
3236b9238976Sth {
3237b9238976Sth 	nfs4_trigger_globals_t	*ntg;
3238b9238976Sth 	zone_t			*zone = curproc->p_zone;
3239b9238976Sth 
3240b9238976Sth 	ntg = zone_getspecific(nfs4_ephemeral_key, zone);
3241b9238976Sth 
3242b9238976Sth 	ntg->ntg_mount_to = mount_to;
3243b9238976Sth }
3244b9238976Sth 
3245b9238976Sth /*
3246b9238976Sth  * Walk the list of v4 mount options; if they are currently set in vfsp,
3247b9238976Sth  * append them to a new comma-separated mount option string, and return it.
3248b9238976Sth  *
3249b9238976Sth  * Caller should free by calling nfs4_trigger_destroy_mntopts().
3250b9238976Sth  */
3251b9238976Sth static char *
3252b9238976Sth nfs4_trigger_create_mntopts(vfs_t *vfsp)
3253b9238976Sth {
3254b9238976Sth 	uint_t i;
3255b9238976Sth 	char *mntopts;
3256b9238976Sth 	struct vfssw *vswp;
3257b9238976Sth 	mntopts_t *optproto;
3258b9238976Sth 
3259b9238976Sth 	mntopts = kmem_zalloc(MAX_MNTOPT_STR, KM_SLEEP);
3260b9238976Sth 
3261b9238976Sth 	/* get the list of applicable mount options for v4; locks *vswp */
3262b9238976Sth 	vswp = vfs_getvfssw(MNTTYPE_NFS4);
3263b9238976Sth 	optproto = &vswp->vsw_optproto;
3264b9238976Sth 
3265b9238976Sth 	for (i = 0; i < optproto->mo_count; i++) {
3266b9238976Sth 		struct mntopt *mop = &optproto->mo_list[i];
3267b9238976Sth 
3268b9238976Sth 		if (mop->mo_flags & MO_EMPTY)
3269b9238976Sth 			continue;
3270b9238976Sth 
3271b9238976Sth 		if (nfs4_trigger_add_mntopt(mntopts, mop->mo_name, vfsp)) {
3272b9238976Sth 			kmem_free(mntopts, MAX_MNTOPT_STR);
3273b9238976Sth 			vfs_unrefvfssw(vswp);
3274b9238976Sth 			return (NULL);
3275b9238976Sth 		}
3276b9238976Sth 	}
3277b9238976Sth 
3278b9238976Sth 	vfs_unrefvfssw(vswp);
3279b9238976Sth 
3280b9238976Sth 	/*
3281b9238976Sth 	 * MNTOPT_XATTR is not in the v4 mount opt proto list,
3282b9238976Sth 	 * and it may only be passed via MS_OPTIONSTR, so we
3283b9238976Sth 	 * must handle it here.
3284b9238976Sth 	 *
3285b9238976Sth 	 * Ideally, it would be in the list, but NFS does not specify its
3286b9238976Sth 	 * own opt proto list, it uses instead the default one. Since
3287b9238976Sth 	 * not all filesystems support extended attrs, it would not be
3288b9238976Sth 	 * appropriate to add it there.
3289b9238976Sth 	 */
3290b9238976Sth 	if (nfs4_trigger_add_mntopt(mntopts, MNTOPT_XATTR, vfsp) ||
3291b9238976Sth 	    nfs4_trigger_add_mntopt(mntopts, MNTOPT_NOXATTR, vfsp)) {
3292b9238976Sth 		kmem_free(mntopts, MAX_MNTOPT_STR);
3293b9238976Sth 		return (NULL);
3294b9238976Sth 	}
3295b9238976Sth 
3296b9238976Sth 	return (mntopts);
3297b9238976Sth }
3298b9238976Sth 
3299b9238976Sth static void
3300b9238976Sth nfs4_trigger_destroy_mntopts(char *mntopts)
3301b9238976Sth {
3302b9238976Sth 	if (mntopts)
3303b9238976Sth 		kmem_free(mntopts, MAX_MNTOPT_STR);
3304b9238976Sth }
3305b9238976Sth 
3306b9238976Sth /*
3307b9238976Sth  * Check a single mount option (optname). Add to mntopts if it is set in VFS.
3308b9238976Sth  */
3309b9238976Sth static int
3310b9238976Sth nfs4_trigger_add_mntopt(char *mntopts, char *optname, vfs_t *vfsp)
3311b9238976Sth {
3312b9238976Sth 	if (mntopts == NULL || optname == NULL || vfsp == NULL)
3313b9238976Sth 		return (EINVAL);
3314b9238976Sth 
3315b9238976Sth 	if (vfs_optionisset(vfsp, optname, NULL)) {
3316b9238976Sth 		size_t mntoptslen = strlen(mntopts);
3317b9238976Sth 		size_t optnamelen = strlen(optname);
3318b9238976Sth 
3319b9238976Sth 		/* +1 for ',', +1 for NUL */
3320b9238976Sth 		if (mntoptslen + optnamelen + 2 > MAX_MNTOPT_STR)
3321b9238976Sth 			return (EOVERFLOW);
3322b9238976Sth 
3323b9238976Sth 		/* first or subsequent mount option? */
3324b9238976Sth 		if (*mntopts != '\0')
3325b9238976Sth 			(void) strcat(mntopts, ",");
3326b9238976Sth 
3327b9238976Sth 		(void) strcat(mntopts, optname);
3328b9238976Sth 	}
3329b9238976Sth 
3330b9238976Sth 	return (0);
3331b9238976Sth }
3332b9238976Sth 
3333b9238976Sth static enum clnt_stat
33342f172c55SRobert Thurlow nfs4_ping_server_common(struct knetconfig *knc, struct netbuf *addr, int nointr)
3335b9238976Sth {
33362f172c55SRobert Thurlow 	int retries;
3337b9238976Sth 	uint_t max_msgsize;
3338b9238976Sth 	enum clnt_stat status;
3339b9238976Sth 	CLIENT *cl;
3340b9238976Sth 	struct timeval timeout;
3341b9238976Sth 
3342b9238976Sth 	/* as per recov_newserver() */
3343b9238976Sth 	max_msgsize = 0;
3344b9238976Sth 	retries = 1;
3345b9238976Sth 	timeout.tv_sec = 2;
3346b9238976Sth 	timeout.tv_usec = 0;
3347b9238976Sth 
33482f172c55SRobert Thurlow 	if (clnt_tli_kcreate(knc, addr, NFS_PROGRAM, NFS_V4,
33492f172c55SRobert Thurlow 	    max_msgsize, retries, CRED(), &cl) != 0)
3350b9238976Sth 		return (RPC_FAILED);
3351b9238976Sth 
3352b9238976Sth 	if (nointr)
3353b9238976Sth 		cl->cl_nosignal = TRUE;
3354b9238976Sth 	status = CLNT_CALL(cl, RFS_NULL, xdr_void, NULL, xdr_void, NULL,
3355b9238976Sth 	    timeout);
3356b9238976Sth 	if (nointr)
3357b9238976Sth 		cl->cl_nosignal = FALSE;
3358b9238976Sth 
3359b9238976Sth 	AUTH_DESTROY(cl->cl_auth);
3360b9238976Sth 	CLNT_DESTROY(cl);
3361b9238976Sth 
3362b9238976Sth 	return (status);
3363b9238976Sth }
33642f172c55SRobert Thurlow 
33652f172c55SRobert Thurlow static enum clnt_stat
33662f172c55SRobert Thurlow nfs4_trigger_ping_server(servinfo4_t *svp, int nointr)
33672f172c55SRobert Thurlow {
33682f172c55SRobert Thurlow 	return (nfs4_ping_server_common(svp->sv_knconf, &svp->sv_addr, nointr));
33692f172c55SRobert Thurlow }
3370