1*4bff34e3Sthurlow /*
2*4bff34e3Sthurlow  * Copyright (c) 2000-2001, Boris Popov
3*4bff34e3Sthurlow  * All rights reserved.
4*4bff34e3Sthurlow  *
5*4bff34e3Sthurlow  * Redistribution and use in source and binary forms, with or without
6*4bff34e3Sthurlow  * modification, are permitted provided that the following conditions
7*4bff34e3Sthurlow  * are met:
8*4bff34e3Sthurlow  * 1. Redistributions of source code must retain the above copyright
9*4bff34e3Sthurlow  *    notice, this list of conditions and the following disclaimer.
10*4bff34e3Sthurlow  * 2. Redistributions in binary form must reproduce the above copyright
11*4bff34e3Sthurlow  *    notice, this list of conditions and the following disclaimer in the
12*4bff34e3Sthurlow  *    documentation and/or other materials provided with the distribution.
13*4bff34e3Sthurlow  * 3. All advertising materials mentioning features or use of this software
14*4bff34e3Sthurlow  *    must display the following acknowledgement:
15*4bff34e3Sthurlow  *    This product includes software developed by Boris Popov.
16*4bff34e3Sthurlow  * 4. Neither the name of the author nor the names of any co-contributors
17*4bff34e3Sthurlow  *    may be used to endorse or promote products derived from this software
18*4bff34e3Sthurlow  *    without specific prior written permission.
19*4bff34e3Sthurlow  *
20*4bff34e3Sthurlow  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21*4bff34e3Sthurlow  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22*4bff34e3Sthurlow  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23*4bff34e3Sthurlow  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24*4bff34e3Sthurlow  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25*4bff34e3Sthurlow  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26*4bff34e3Sthurlow  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27*4bff34e3Sthurlow  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28*4bff34e3Sthurlow  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29*4bff34e3Sthurlow  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30*4bff34e3Sthurlow  * SUCH DAMAGE.
31*4bff34e3Sthurlow  *
32*4bff34e3Sthurlow  * $Id: smbfs_vfsops.c,v 1.73.64.1 2005/05/27 02:35:28 lindak Exp $
33*4bff34e3Sthurlow  */
34*4bff34e3Sthurlow 
35*4bff34e3Sthurlow /*
36*4bff34e3Sthurlow  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
37*4bff34e3Sthurlow  * Use is subject to license terms.
38*4bff34e3Sthurlow  */
39*4bff34e3Sthurlow 
40*4bff34e3Sthurlow #pragma ident	"%Z%%M%	%I%	%E% SMI"
41*4bff34e3Sthurlow 
42*4bff34e3Sthurlow #include <sys/systm.h>
43*4bff34e3Sthurlow #include <sys/cred.h>
44*4bff34e3Sthurlow #include <sys/vfs.h>
45*4bff34e3Sthurlow #include <sys/vnode.h>
46*4bff34e3Sthurlow #include <fs/fs_subr.h>
47*4bff34e3Sthurlow #include <sys/sysmacros.h>
48*4bff34e3Sthurlow #include <sys/kmem.h>
49*4bff34e3Sthurlow #include <sys/mkdev.h>
50*4bff34e3Sthurlow #include <sys/mount.h>
51*4bff34e3Sthurlow #include <sys/statvfs.h>
52*4bff34e3Sthurlow #include <sys/errno.h>
53*4bff34e3Sthurlow #include <sys/debug.h>
54*4bff34e3Sthurlow #include <sys/cmn_err.h>
55*4bff34e3Sthurlow #include <sys/modctl.h>
56*4bff34e3Sthurlow #include <sys/policy.h>
57*4bff34e3Sthurlow #include <sys/atomic.h>
58*4bff34e3Sthurlow #include <sys/zone.h>
59*4bff34e3Sthurlow #include <sys/vfs_opreg.h>
60*4bff34e3Sthurlow #include <sys/mntent.h>
61*4bff34e3Sthurlow #include <sys/priv.h>
62*4bff34e3Sthurlow #include <sys/tsol/label.h>
63*4bff34e3Sthurlow #include <sys/tsol/tndb.h>
64*4bff34e3Sthurlow #include <inet/ip.h>
65*4bff34e3Sthurlow 
66*4bff34e3Sthurlow #include <netsmb/smb_osdep.h>
67*4bff34e3Sthurlow #include <netsmb/smb.h>
68*4bff34e3Sthurlow #include <netsmb/smb_conn.h>
69*4bff34e3Sthurlow #include <netsmb/smb_subr.h>
70*4bff34e3Sthurlow #include <netsmb/smb_dev.h>
71*4bff34e3Sthurlow 
72*4bff34e3Sthurlow #include <smbfs/smbfs.h>
73*4bff34e3Sthurlow #include <smbfs/smbfs_node.h>
74*4bff34e3Sthurlow #include <smbfs/smbfs_subr.h>
75*4bff34e3Sthurlow 
76*4bff34e3Sthurlow /*
77*4bff34e3Sthurlow  * Local functions definitions.
78*4bff34e3Sthurlow  */
79*4bff34e3Sthurlow int		smbfsinit(int fstyp, char *name);
80*4bff34e3Sthurlow void		smbfsfini();
81*4bff34e3Sthurlow static int	smbfs_mount_label_policy(vfs_t *, void *, int, cred_t *);
82*4bff34e3Sthurlow 
83*4bff34e3Sthurlow static vfsdef_t vfw = {
84*4bff34e3Sthurlow 	VFSDEF_VERSION,
85*4bff34e3Sthurlow 	"smbfs",		/* type name string */
86*4bff34e3Sthurlow 	smbfsinit,		/* init routine */
87*4bff34e3Sthurlow 	VSW_NOTZONESAFE,	/* flags */
88*4bff34e3Sthurlow 	NULL			/* mount options table prototype */
89*4bff34e3Sthurlow };
90*4bff34e3Sthurlow 
91*4bff34e3Sthurlow static struct modlfs modlfs = {
92*4bff34e3Sthurlow 	&mod_fsops,
93*4bff34e3Sthurlow 	"SMBFS filesystem v" SMBFS_VER_STR,
94*4bff34e3Sthurlow 	&vfw
95*4bff34e3Sthurlow };
96*4bff34e3Sthurlow 
97*4bff34e3Sthurlow static struct modlinkage modlinkage = {
98*4bff34e3Sthurlow 	MODREV_1, (void *)&modlfs, NULL
99*4bff34e3Sthurlow };
100*4bff34e3Sthurlow 
101*4bff34e3Sthurlow /*
102*4bff34e3Sthurlow  * Mutex to protect the following variables:
103*4bff34e3Sthurlow  *	  smbfs_major
104*4bff34e3Sthurlow  *	  smbfs_minor
105*4bff34e3Sthurlow  */
106*4bff34e3Sthurlow extern	kmutex_t	smbfs_minor_lock;
107*4bff34e3Sthurlow extern	int		smbfs_major;
108*4bff34e3Sthurlow extern	int		smbfs_minor;
109*4bff34e3Sthurlow 
110*4bff34e3Sthurlow /*
111*4bff34e3Sthurlow  * Prevent unloads while we have mounts
112*4bff34e3Sthurlow  */
113*4bff34e3Sthurlow uint32_t	smbfs_mountcount;
114*4bff34e3Sthurlow 
115*4bff34e3Sthurlow /*
116*4bff34e3Sthurlow  * smbfs vfs operations.
117*4bff34e3Sthurlow  */
118*4bff34e3Sthurlow static int	smbfs_mount(vfs_t *, vnode_t *, struct mounta *, cred_t *);
119*4bff34e3Sthurlow static int	smbfs_unmount(vfs_t *, int, cred_t *);
120*4bff34e3Sthurlow static int	smbfs_root(vfs_t *, vnode_t **);
121*4bff34e3Sthurlow static int	smbfs_statvfs(vfs_t *, statvfs64_t *);
122*4bff34e3Sthurlow static int	smbfs_sync(vfs_t *, short, cred_t *);
123*4bff34e3Sthurlow static void	smbfs_freevfs(vfs_t *);
124*4bff34e3Sthurlow 
125*4bff34e3Sthurlow /*
126*4bff34e3Sthurlow  * Module loading
127*4bff34e3Sthurlow  */
128*4bff34e3Sthurlow 
129*4bff34e3Sthurlow /*
130*4bff34e3Sthurlow  * This routine is invoked automatically when the kernel module
131*4bff34e3Sthurlow  * containing this routine is loaded.  This allows module specific
132*4bff34e3Sthurlow  * initialization to be done when the module is loaded.
133*4bff34e3Sthurlow  */
134*4bff34e3Sthurlow int
135*4bff34e3Sthurlow _init(void)
136*4bff34e3Sthurlow {
137*4bff34e3Sthurlow 	int		status;
138*4bff34e3Sthurlow 
139*4bff34e3Sthurlow 	/*
140*4bff34e3Sthurlow 	 * Check compiled-in version of "nsmb"
141*4bff34e3Sthurlow 	 * that we're linked with.  (paranoid)
142*4bff34e3Sthurlow 	 */
143*4bff34e3Sthurlow 	if (nsmb_version != NSMB_VERSION) {
144*4bff34e3Sthurlow 		cmn_err(CE_WARN, "_init: nsmb version mismatch");
145*4bff34e3Sthurlow 		return (ENOTTY);
146*4bff34e3Sthurlow 	}
147*4bff34e3Sthurlow 
148*4bff34e3Sthurlow 	smbfs_mountcount = 0;
149*4bff34e3Sthurlow 
150*4bff34e3Sthurlow 	if ((status = smbfs_clntinit()) != 0) {
151*4bff34e3Sthurlow 		cmn_err(CE_WARN, "_init: smbfs_clntinit failed");
152*4bff34e3Sthurlow 		return (status);
153*4bff34e3Sthurlow 	}
154*4bff34e3Sthurlow 
155*4bff34e3Sthurlow 	status = mod_install((struct modlinkage *)&modlinkage);
156*4bff34e3Sthurlow 	return (status);
157*4bff34e3Sthurlow }
158*4bff34e3Sthurlow 
159*4bff34e3Sthurlow /*
160*4bff34e3Sthurlow  * Free kernel module resources that were allocated in _init
161*4bff34e3Sthurlow  * and remove the linkage information into the kernel
162*4bff34e3Sthurlow  */
163*4bff34e3Sthurlow int
164*4bff34e3Sthurlow _fini(void)
165*4bff34e3Sthurlow {
166*4bff34e3Sthurlow 	int	error;
167*4bff34e3Sthurlow 
168*4bff34e3Sthurlow 	/*
169*4bff34e3Sthurlow 	 * If a forcedly unmounted instance is still hanging around,
170*4bff34e3Sthurlow 	 * we cannot allow the module to be unloaded because that would
171*4bff34e3Sthurlow 	 * cause panics once the VFS framework decides it's time to call
172*4bff34e3Sthurlow 	 * into VFS_FREEVFS().
173*4bff34e3Sthurlow 	 */
174*4bff34e3Sthurlow 	if (smbfs_mountcount)
175*4bff34e3Sthurlow 		return (EBUSY);
176*4bff34e3Sthurlow 
177*4bff34e3Sthurlow 	error = mod_remove(&modlinkage);
178*4bff34e3Sthurlow 	if (error)
179*4bff34e3Sthurlow 		return (error);
180*4bff34e3Sthurlow 
181*4bff34e3Sthurlow 	/*
182*4bff34e3Sthurlow 	 * Free the allocated smbnodes, etc.
183*4bff34e3Sthurlow 	 */
184*4bff34e3Sthurlow 	smbfs_clntfini();
185*4bff34e3Sthurlow 
186*4bff34e3Sthurlow 	/*
187*4bff34e3Sthurlow 	 * Free the ops vectors
188*4bff34e3Sthurlow 	 */
189*4bff34e3Sthurlow 	smbfsfini();
190*4bff34e3Sthurlow 	return (0);
191*4bff34e3Sthurlow }
192*4bff34e3Sthurlow 
193*4bff34e3Sthurlow /*
194*4bff34e3Sthurlow  * Return information about the module
195*4bff34e3Sthurlow  */
196*4bff34e3Sthurlow int
197*4bff34e3Sthurlow _info(struct modinfo *modinfop)
198*4bff34e3Sthurlow {
199*4bff34e3Sthurlow 	return (mod_info((struct modlinkage *)&modlinkage, modinfop));
200*4bff34e3Sthurlow }
201*4bff34e3Sthurlow 
202*4bff34e3Sthurlow /*
203*4bff34e3Sthurlow  * Initialize the vfs structure
204*4bff34e3Sthurlow  */
205*4bff34e3Sthurlow 
206*4bff34e3Sthurlow int smbfsfstyp;
207*4bff34e3Sthurlow vfsops_t *smbfs_vfsops = NULL;
208*4bff34e3Sthurlow 
209*4bff34e3Sthurlow static const fs_operation_def_t smbfs_vfsops_template[] = {
210*4bff34e3Sthurlow 	{ VFSNAME_MOUNT, { .vfs_mount = smbfs_mount } },
211*4bff34e3Sthurlow 	{ VFSNAME_UNMOUNT, { .vfs_unmount = smbfs_unmount } },
212*4bff34e3Sthurlow 	{ VFSNAME_ROOT,	{ .vfs_root = smbfs_root } },
213*4bff34e3Sthurlow 	{ VFSNAME_STATVFS, { .vfs_statvfs = smbfs_statvfs } },
214*4bff34e3Sthurlow 	{ VFSNAME_SYNC,	{ .vfs_sync = smbfs_sync } },
215*4bff34e3Sthurlow 	{ VFSNAME_VGET,	{ .error = fs_nosys } },
216*4bff34e3Sthurlow 	{ VFSNAME_MOUNTROOT, { .error = fs_nosys } },
217*4bff34e3Sthurlow 	{ VFSNAME_FREEVFS, { .vfs_freevfs = smbfs_freevfs } },
218*4bff34e3Sthurlow 	{ NULL, NULL }
219*4bff34e3Sthurlow };
220*4bff34e3Sthurlow 
221*4bff34e3Sthurlow int
222*4bff34e3Sthurlow smbfsinit(int fstyp, char *name)
223*4bff34e3Sthurlow {
224*4bff34e3Sthurlow 	int		error;
225*4bff34e3Sthurlow 
226*4bff34e3Sthurlow 	error = vfs_setfsops(fstyp, smbfs_vfsops_template, &smbfs_vfsops);
227*4bff34e3Sthurlow 	if (error != 0) {
228*4bff34e3Sthurlow 		zcmn_err(GLOBAL_ZONEID, CE_WARN,
229*4bff34e3Sthurlow 		    "smbfsinit: bad vfs ops template");
230*4bff34e3Sthurlow 		return (error);
231*4bff34e3Sthurlow 	}
232*4bff34e3Sthurlow 
233*4bff34e3Sthurlow 	error = vn_make_ops(name, smbfs_vnodeops_template, &smbfs_vnodeops);
234*4bff34e3Sthurlow 	if (error != 0) {
235*4bff34e3Sthurlow 		(void) vfs_freevfsops_by_type(fstyp);
236*4bff34e3Sthurlow 		zcmn_err(GLOBAL_ZONEID, CE_WARN,
237*4bff34e3Sthurlow 		    "smbfsinit: bad vnode ops template");
238*4bff34e3Sthurlow 		return (error);
239*4bff34e3Sthurlow 	}
240*4bff34e3Sthurlow 
241*4bff34e3Sthurlow 	smbfsfstyp = fstyp;
242*4bff34e3Sthurlow 
243*4bff34e3Sthurlow 	return (0);
244*4bff34e3Sthurlow }
245*4bff34e3Sthurlow 
246*4bff34e3Sthurlow void
247*4bff34e3Sthurlow smbfsfini()
248*4bff34e3Sthurlow {
249*4bff34e3Sthurlow 	if (smbfs_vfsops) {
250*4bff34e3Sthurlow 		(void) vfs_freevfsops_by_type(smbfsfstyp);
251*4bff34e3Sthurlow 		smbfs_vfsops = NULL;
252*4bff34e3Sthurlow 	}
253*4bff34e3Sthurlow 	if (smbfs_vnodeops) {
254*4bff34e3Sthurlow 		vn_freevnodeops(smbfs_vnodeops);
255*4bff34e3Sthurlow 		smbfs_vnodeops = NULL;
256*4bff34e3Sthurlow 	}
257*4bff34e3Sthurlow }
258*4bff34e3Sthurlow 
259*4bff34e3Sthurlow void
260*4bff34e3Sthurlow smbfs_free_smi(smbmntinfo_t *smi)
261*4bff34e3Sthurlow {
262*4bff34e3Sthurlow 	if (smi) {
263*4bff34e3Sthurlow 		smbfs_zonelist_remove(smi);
264*4bff34e3Sthurlow 		kmem_free(smi, sizeof (smbmntinfo_t));
265*4bff34e3Sthurlow 	}
266*4bff34e3Sthurlow }
267*4bff34e3Sthurlow 
268*4bff34e3Sthurlow /*
269*4bff34e3Sthurlow  * smbfs mount vfsop
270*4bff34e3Sthurlow  * Set up mount info record and attach it to vfs struct.
271*4bff34e3Sthurlow  */
272*4bff34e3Sthurlow static int
273*4bff34e3Sthurlow smbfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
274*4bff34e3Sthurlow {
275*4bff34e3Sthurlow 	char		*data = uap->dataptr;
276*4bff34e3Sthurlow 	int		error;
277*4bff34e3Sthurlow 	vnode_t 	*rtvp = NULL;	/* root of this fs */
278*4bff34e3Sthurlow 	smbmntinfo_t 	*smi = NULL;
279*4bff34e3Sthurlow 	dev_t 		smbfs_dev;
280*4bff34e3Sthurlow 	int 		version;
281*4bff34e3Sthurlow 	int 		devfd;
282*4bff34e3Sthurlow 	zone_t		*zone = curproc->p_zone;
283*4bff34e3Sthurlow 	zone_t		*mntzone = NULL;
284*4bff34e3Sthurlow 	smb_share_t 	*ssp = NULL;
285*4bff34e3Sthurlow 	smb_cred_t 	scred;
286*4bff34e3Sthurlow 
287*4bff34e3Sthurlow 	STRUCT_DECL(smbfs_args, args);		/* smbfs mount arguments */
288*4bff34e3Sthurlow 
289*4bff34e3Sthurlow 	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
290*4bff34e3Sthurlow 		return (error);
291*4bff34e3Sthurlow 
292*4bff34e3Sthurlow 	if (mvp->v_type != VDIR)
293*4bff34e3Sthurlow 		return (ENOTDIR);
294*4bff34e3Sthurlow 
295*4bff34e3Sthurlow 	/*
296*4bff34e3Sthurlow 	 * get arguments
297*4bff34e3Sthurlow 	 *
298*4bff34e3Sthurlow 	 * uap->datalen might be different from sizeof (args)
299*4bff34e3Sthurlow 	 * in a compatible situation.
300*4bff34e3Sthurlow 	 */
301*4bff34e3Sthurlow 	STRUCT_INIT(args, get_udatamodel());
302*4bff34e3Sthurlow 	bzero(STRUCT_BUF(args), SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE));
303*4bff34e3Sthurlow 	if (copyin(data, STRUCT_BUF(args), MIN(uap->datalen,
304*4bff34e3Sthurlow 	    SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE))))
305*4bff34e3Sthurlow 		return (EFAULT);
306*4bff34e3Sthurlow 
307*4bff34e3Sthurlow 	/*
308*4bff34e3Sthurlow 	 * Check mount program version
309*4bff34e3Sthurlow 	 */
310*4bff34e3Sthurlow 	version = STRUCT_FGET(args, version);
311*4bff34e3Sthurlow 	if (version != SMBFS_VERSION) {
312*4bff34e3Sthurlow 		cmn_err(CE_WARN, "mount version mismatch:"
313*4bff34e3Sthurlow 		    " kernel=%d, mount=%d\n",
314*4bff34e3Sthurlow 		    SMBFS_VERSION, version);
315*4bff34e3Sthurlow 		return (EINVAL);
316*4bff34e3Sthurlow 	}
317*4bff34e3Sthurlow 
318*4bff34e3Sthurlow 	if (uap->flags & MS_REMOUNT) {
319*4bff34e3Sthurlow 		cmn_err(CE_WARN, "MS_REMOUNT not implemented");
320*4bff34e3Sthurlow 		return (ENOTSUP);
321*4bff34e3Sthurlow 	}
322*4bff34e3Sthurlow 
323*4bff34e3Sthurlow 	/*
324*4bff34e3Sthurlow 	 * Check for busy
325*4bff34e3Sthurlow 	 */
326*4bff34e3Sthurlow 	mutex_enter(&mvp->v_lock);
327*4bff34e3Sthurlow 	if (!(uap->flags & MS_OVERLAY) &&
328*4bff34e3Sthurlow 	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
329*4bff34e3Sthurlow 		mutex_exit(&mvp->v_lock);
330*4bff34e3Sthurlow 		return (EBUSY);
331*4bff34e3Sthurlow 	}
332*4bff34e3Sthurlow 	mutex_exit(&mvp->v_lock);
333*4bff34e3Sthurlow 
334*4bff34e3Sthurlow 	/*
335*4bff34e3Sthurlow 	 * Get the "share" from the netsmb driver (ssp).
336*4bff34e3Sthurlow 	 * It is returned with a "ref" (hold) for us.
337*4bff34e3Sthurlow 	 * Release this hold: at errout below, or in
338*4bff34e3Sthurlow 	 * smbfs_freevfs().
339*4bff34e3Sthurlow 	 */
340*4bff34e3Sthurlow 	devfd = STRUCT_FGET(args, devfd);
341*4bff34e3Sthurlow 	error = smb_dev2share(devfd, &ssp);
342*4bff34e3Sthurlow 	if (error) {
343*4bff34e3Sthurlow 		cmn_err(CE_WARN, "invalid device handle %d (%d)\n",
344*4bff34e3Sthurlow 		    devfd, error);
345*4bff34e3Sthurlow 		return (error);
346*4bff34e3Sthurlow 	}
347*4bff34e3Sthurlow 
348*4bff34e3Sthurlow 	/*
349*4bff34e3Sthurlow 	 * We don't have data structures to support multiple mounts of
350*4bff34e3Sthurlow 	 * the same share object by the same owner, so don't allow it.
351*4bff34e3Sthurlow 	 */
352*4bff34e3Sthurlow 	if (ssp->ss_mount != NULL) {
353*4bff34e3Sthurlow 		smb_share_rele(ssp);
354*4bff34e3Sthurlow 		return (EBUSY);
355*4bff34e3Sthurlow 	}
356*4bff34e3Sthurlow 
357*4bff34e3Sthurlow 	smb_credinit(&scred, curproc, cr);
358*4bff34e3Sthurlow 
359*4bff34e3Sthurlow 	/*
360*4bff34e3Sthurlow 	 * Use "goto errout" from here on.
361*4bff34e3Sthurlow 	 * See: ssp, smi, rtvp, mntzone
362*4bff34e3Sthurlow 	 */
363*4bff34e3Sthurlow 
364*4bff34e3Sthurlow 	/*
365*4bff34e3Sthurlow 	 * Determine the zone we're being mounted into.
366*4bff34e3Sthurlow 	 */
367*4bff34e3Sthurlow 	zone_hold(mntzone = zone);		/* start with this assumption */
368*4bff34e3Sthurlow 	if (getzoneid() == GLOBAL_ZONEID) {
369*4bff34e3Sthurlow 		zone_rele(mntzone);
370*4bff34e3Sthurlow 		mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
371*4bff34e3Sthurlow 		ASSERT(mntzone != NULL);
372*4bff34e3Sthurlow 		if (mntzone != zone) {
373*4bff34e3Sthurlow 			error = EBUSY;
374*4bff34e3Sthurlow 			goto errout;
375*4bff34e3Sthurlow 		}
376*4bff34e3Sthurlow 	}
377*4bff34e3Sthurlow 
378*4bff34e3Sthurlow 	/*
379*4bff34e3Sthurlow 	 * Stop the mount from going any further if the zone is going away.
380*4bff34e3Sthurlow 	 */
381*4bff34e3Sthurlow 	if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
382*4bff34e3Sthurlow 		error = EBUSY;
383*4bff34e3Sthurlow 		goto errout;
384*4bff34e3Sthurlow 	}
385*4bff34e3Sthurlow 
386*4bff34e3Sthurlow 	/*
387*4bff34e3Sthurlow 	 * On a Trusted Extensions client, we may have to force read-only
388*4bff34e3Sthurlow 	 * for read-down mounts.
389*4bff34e3Sthurlow 	 */
390*4bff34e3Sthurlow 	if (is_system_labeled()) {
391*4bff34e3Sthurlow 		void *addr;
392*4bff34e3Sthurlow 		int ipvers = 0;
393*4bff34e3Sthurlow 		struct smb_vc *vcp;
394*4bff34e3Sthurlow 
395*4bff34e3Sthurlow 		vcp = SSTOVC(ssp);
396*4bff34e3Sthurlow 		addr = smb_vc_getipaddr(vcp, &ipvers);
397*4bff34e3Sthurlow 		error = smbfs_mount_label_policy(vfsp, addr, ipvers, cr);
398*4bff34e3Sthurlow 
399*4bff34e3Sthurlow 		if (error > 0)
400*4bff34e3Sthurlow 			goto errout;
401*4bff34e3Sthurlow 
402*4bff34e3Sthurlow 		if (error == -1) {
403*4bff34e3Sthurlow 			/* change mount to read-only to prevent write-down */
404*4bff34e3Sthurlow 			vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
405*4bff34e3Sthurlow 		}
406*4bff34e3Sthurlow 	}
407*4bff34e3Sthurlow 
408*4bff34e3Sthurlow 	/*
409*4bff34e3Sthurlow 	 * Get root vnode.
410*4bff34e3Sthurlow 	 */
411*4bff34e3Sthurlow proceed:
412*4bff34e3Sthurlow 
413*4bff34e3Sthurlow 	/*
414*4bff34e3Sthurlow 	 * Create a mount record and link it to the vfs struct.
415*4bff34e3Sthurlow 	 * Compare with NFS: nfsrootvp()
416*4bff34e3Sthurlow 	 */
417*4bff34e3Sthurlow 	smi = kmem_zalloc(sizeof (smbmntinfo_t), KM_SLEEP);
418*4bff34e3Sthurlow 
419*4bff34e3Sthurlow 	smi->smi_share	= ssp;
420*4bff34e3Sthurlow 	ssp->ss_mount	= smi;
421*4bff34e3Sthurlow 	smi->smi_zone	= mntzone;
422*4bff34e3Sthurlow 
423*4bff34e3Sthurlow 	/*
424*4bff34e3Sthurlow 	 * XXX If not root, get uid/gid from the covered vnode.
425*4bff34e3Sthurlow 	 */
426*4bff34e3Sthurlow 	smi->smi_args.dir_mode	= STRUCT_FGET(args, dir_mode);
427*4bff34e3Sthurlow 	smi->smi_args.file_mode = STRUCT_FGET(args, file_mode);
428*4bff34e3Sthurlow 	smi->smi_args.uid 	= STRUCT_FGET(args, uid);
429*4bff34e3Sthurlow 	smi->smi_args.gid 	= STRUCT_FGET(args, gid);
430*4bff34e3Sthurlow 
431*4bff34e3Sthurlow 	error = smbfs_smb_qfsattr(ssp, &smi->smi_fsattr, &scred);
432*4bff34e3Sthurlow 	if (error) {
433*4bff34e3Sthurlow 		SMBVDEBUG("smbfs_smb_qfsattr error %d\n", error);
434*4bff34e3Sthurlow 	}
435*4bff34e3Sthurlow 
436*4bff34e3Sthurlow #ifdef NOT_YET
437*4bff34e3Sthurlow 	/* Once acls are implemented, remove the ifdefs */
438*4bff34e3Sthurlow 	else if (smbfs_aclsflunksniff(smi, &scred)) {
439*4bff34e3Sthurlow 		mutex_enter(&smi->smi_lock);
440*4bff34e3Sthurlow 		smi->smi_fsattr &= ~FILE_PERSISTENT_ACLS;
441*4bff34e3Sthurlow 		mutex_exit(&smi->smi_lock);
442*4bff34e3Sthurlow 	}
443*4bff34e3Sthurlow #endif /* NOT_YET */
444*4bff34e3Sthurlow 
445*4bff34e3Sthurlow 	/*
446*4bff34e3Sthurlow 	 * Assign a unique device id to the mount
447*4bff34e3Sthurlow 	 */
448*4bff34e3Sthurlow 	mutex_enter(&smbfs_minor_lock);
449*4bff34e3Sthurlow 	do {
450*4bff34e3Sthurlow 		smbfs_minor = (smbfs_minor + 1) & MAXMIN32;
451*4bff34e3Sthurlow 		smbfs_dev = makedevice(smbfs_major, smbfs_minor);
452*4bff34e3Sthurlow 	} while (vfs_devismounted(smbfs_dev));
453*4bff34e3Sthurlow 	mutex_exit(&smbfs_minor_lock);
454*4bff34e3Sthurlow 
455*4bff34e3Sthurlow 	vfsp->vfs_dev	= smbfs_dev;
456*4bff34e3Sthurlow 	vfs_make_fsid(&vfsp->vfs_fsid, smbfs_dev, smbfsfstyp);
457*4bff34e3Sthurlow 	vfsp->vfs_data	= (caddr_t)smi;
458*4bff34e3Sthurlow 	vfsp->vfs_fstype = smbfsfstyp;
459*4bff34e3Sthurlow 	vfsp->vfs_bsize = MAXBSIZE;
460*4bff34e3Sthurlow 	vfsp->vfs_bcount = 0;
461*4bff34e3Sthurlow 
462*4bff34e3Sthurlow 	smi->smi_flags	= SMI_INT | SMI_LLOCK;
463*4bff34e3Sthurlow 	smi->smi_vfsp	= vfsp;
464*4bff34e3Sthurlow 	smbfs_zonelist_add(smi);
465*4bff34e3Sthurlow 
466*4bff34e3Sthurlow 	/*
467*4bff34e3Sthurlow 	 * Create the root vnode, which we need in unmount
468*4bff34e3Sthurlow 	 * for the call to smb_check_table(), etc.
469*4bff34e3Sthurlow 	 */
470*4bff34e3Sthurlow 	rtvp = smbfs_make_node(vfsp, "\\", 1, NULL, 0, NULL);
471*4bff34e3Sthurlow 	if (!rtvp) {
472*4bff34e3Sthurlow 		cmn_err(CE_WARN, "smbfs_mount: make_node failed\n");
473*4bff34e3Sthurlow 		return (ENOENT);
474*4bff34e3Sthurlow 	}
475*4bff34e3Sthurlow 	rtvp->v_type = VDIR;
476*4bff34e3Sthurlow 	rtvp->v_flag |= VROOT;
477*4bff34e3Sthurlow 
478*4bff34e3Sthurlow 	/*
479*4bff34e3Sthurlow 	 * Could get attributes here, but that can wait
480*4bff34e3Sthurlow 	 * until someone does a getattr call.
481*4bff34e3Sthurlow 	 *
482*4bff34e3Sthurlow 	 * NFS does other stuff here too:
483*4bff34e3Sthurlow 	 *   async worker threads
484*4bff34e3Sthurlow 	 *   init kstats
485*4bff34e3Sthurlow 	 *
486*4bff34e3Sthurlow 	 * End of code from NFS nfsrootvp()
487*4bff34e3Sthurlow 	 */
488*4bff34e3Sthurlow 
489*4bff34e3Sthurlow 	smb_credrele(&scred);
490*4bff34e3Sthurlow 
491*4bff34e3Sthurlow 	smi->smi_root = VTOSMB(rtvp);
492*4bff34e3Sthurlow 
493*4bff34e3Sthurlow 	atomic_inc_32(&smbfs_mountcount);
494*4bff34e3Sthurlow 
495*4bff34e3Sthurlow 	return (0);
496*4bff34e3Sthurlow 
497*4bff34e3Sthurlow errout:
498*4bff34e3Sthurlow 
499*4bff34e3Sthurlow 	ASSERT(rtvp == NULL);
500*4bff34e3Sthurlow 
501*4bff34e3Sthurlow 	vfsp->vfs_data = NULL;
502*4bff34e3Sthurlow 	if (smi)
503*4bff34e3Sthurlow 		smbfs_free_smi(smi);
504*4bff34e3Sthurlow 
505*4bff34e3Sthurlow 	if (mntzone != NULL)
506*4bff34e3Sthurlow 		zone_rele(mntzone);
507*4bff34e3Sthurlow 
508*4bff34e3Sthurlow 	if (ssp)
509*4bff34e3Sthurlow 		smb_share_rele(ssp);
510*4bff34e3Sthurlow 
511*4bff34e3Sthurlow 	smb_credrele(&scred);
512*4bff34e3Sthurlow 
513*4bff34e3Sthurlow 	/* args, if we allocated */
514*4bff34e3Sthurlow 
515*4bff34e3Sthurlow 	return (error);
516*4bff34e3Sthurlow }
517*4bff34e3Sthurlow 
518*4bff34e3Sthurlow /*
519*4bff34e3Sthurlow  * vfs operations
520*4bff34e3Sthurlow  */
521*4bff34e3Sthurlow static int
522*4bff34e3Sthurlow smbfs_unmount(vfs_t *vfsp, int flag, cred_t *cr)
523*4bff34e3Sthurlow {
524*4bff34e3Sthurlow 	smbmntinfo_t	*smi;
525*4bff34e3Sthurlow 	smbnode_t	*rtnp;
526*4bff34e3Sthurlow 
527*4bff34e3Sthurlow 	smi = VFTOSMI(vfsp);
528*4bff34e3Sthurlow 
529*4bff34e3Sthurlow 	if (secpolicy_fs_unmount(cr, vfsp) != 0)
530*4bff34e3Sthurlow 		return (EPERM);
531*4bff34e3Sthurlow 
532*4bff34e3Sthurlow 	if ((flag & MS_FORCE) == 0) {
533*4bff34e3Sthurlow #ifdef APPLE
534*4bff34e3Sthurlow 		smbfs_rflush(vfsp, cr);
535*4bff34e3Sthurlow #endif
536*4bff34e3Sthurlow 
537*4bff34e3Sthurlow 		/*
538*4bff34e3Sthurlow 		 * If there are any active vnodes on this file system,
539*4bff34e3Sthurlow 		 * (other than the root vnode) then the file system is
540*4bff34e3Sthurlow 		 * busy and can't be umounted.
541*4bff34e3Sthurlow 		 */
542*4bff34e3Sthurlow 		if (smb_check_table(vfsp, smi->smi_root))
543*4bff34e3Sthurlow 			return (EBUSY);
544*4bff34e3Sthurlow 
545*4bff34e3Sthurlow 		/*
546*4bff34e3Sthurlow 		 * We normally hold a ref to the root vnode, so
547*4bff34e3Sthurlow 		 * check for references beyond the one we expect:
548*4bff34e3Sthurlow 		 *   smbmntinfo_t -> smi_root
549*4bff34e3Sthurlow 		 * Note that NFS does not hold the root vnode.
550*4bff34e3Sthurlow 		 */
551*4bff34e3Sthurlow 		if (smi->smi_root &&
552*4bff34e3Sthurlow 		    smi->smi_root->r_vnode->v_count > 1)
553*4bff34e3Sthurlow 			return (EBUSY);
554*4bff34e3Sthurlow 	}
555*4bff34e3Sthurlow 
556*4bff34e3Sthurlow 	/*
557*4bff34e3Sthurlow 	 * common code for both forced and non-forced
558*4bff34e3Sthurlow 	 *
559*4bff34e3Sthurlow 	 * Setting VFS_UNMOUNTED prevents new operations.
560*4bff34e3Sthurlow 	 * Operations already underway may continue,
561*4bff34e3Sthurlow 	 * but not for long.
562*4bff34e3Sthurlow 	 */
563*4bff34e3Sthurlow 	vfsp->vfs_flag |= VFS_UNMOUNTED;
564*4bff34e3Sthurlow 
565*4bff34e3Sthurlow 	/*
566*4bff34e3Sthurlow 	 * Shutdown any outstanding I/O requests on this share,
567*4bff34e3Sthurlow 	 * and force a tree disconnect.  The share object will
568*4bff34e3Sthurlow 	 * continue to hang around until smb_share_rele().
569*4bff34e3Sthurlow 	 * This should also cause most active nodes to be
570*4bff34e3Sthurlow 	 * released as their operations fail with EIO.
571*4bff34e3Sthurlow 	 */
572*4bff34e3Sthurlow 	smb_share_kill(smi->smi_share);
573*4bff34e3Sthurlow 
574*4bff34e3Sthurlow 	/*
575*4bff34e3Sthurlow 	 * If we hold the root VP (and we normally do)
576*4bff34e3Sthurlow 	 * then it's safe to release it now.
577*4bff34e3Sthurlow 	 */
578*4bff34e3Sthurlow 	if (smi->smi_root) {
579*4bff34e3Sthurlow 		rtnp = smi->smi_root;
580*4bff34e3Sthurlow 		smi->smi_root = NULL;
581*4bff34e3Sthurlow 		VN_RELE(rtnp->r_vnode);	/* release root vnode */
582*4bff34e3Sthurlow 	}
583*4bff34e3Sthurlow 
584*4bff34e3Sthurlow 	/*
585*4bff34e3Sthurlow 	 * Remove all nodes from the node hash tables.
586*4bff34e3Sthurlow 	 * This (indirectly) calls: smb_addfree, smbinactive,
587*4bff34e3Sthurlow 	 * which will try to flush dirty pages, etc. so
588*4bff34e3Sthurlow 	 * don't destroy the underlying share just yet.
589*4bff34e3Sthurlow 	 *
590*4bff34e3Sthurlow 	 * Also, with a forced unmount, some nodes may
591*4bff34e3Sthurlow 	 * remain active, and those will get cleaned up
592*4bff34e3Sthurlow 	 * after their last vn_rele.
593*4bff34e3Sthurlow 	 */
594*4bff34e3Sthurlow 	smbfs_destroy_table(vfsp);
595*4bff34e3Sthurlow 
596*4bff34e3Sthurlow 	/*
597*4bff34e3Sthurlow 	 * Delete our kstats...
598*4bff34e3Sthurlow 	 *
599*4bff34e3Sthurlow 	 * Doing it here, rather than waiting until
600*4bff34e3Sthurlow 	 * smbfs_freevfs so these are not visible
601*4bff34e3Sthurlow 	 * after the unmount.
602*4bff34e3Sthurlow 	 */
603*4bff34e3Sthurlow 	if (smi->smi_io_kstats) {
604*4bff34e3Sthurlow 		kstat_delete(smi->smi_io_kstats);
605*4bff34e3Sthurlow 		smi->smi_io_kstats = NULL;
606*4bff34e3Sthurlow 	}
607*4bff34e3Sthurlow 	if (smi->smi_ro_kstats) {
608*4bff34e3Sthurlow 		kstat_delete(smi->smi_ro_kstats);
609*4bff34e3Sthurlow 		smi->smi_ro_kstats = NULL;
610*4bff34e3Sthurlow 	}
611*4bff34e3Sthurlow 
612*4bff34e3Sthurlow 	/*
613*4bff34e3Sthurlow 	 * Note: the smb_share_rele()
614*4bff34e3Sthurlow 	 * happens in smbfs_freevfs()
615*4bff34e3Sthurlow 	 */
616*4bff34e3Sthurlow 
617*4bff34e3Sthurlow 	return (0);
618*4bff34e3Sthurlow }
619*4bff34e3Sthurlow 
620*4bff34e3Sthurlow 
621*4bff34e3Sthurlow /*
622*4bff34e3Sthurlow  * find root of smbfs
623*4bff34e3Sthurlow  */
624*4bff34e3Sthurlow static int
625*4bff34e3Sthurlow smbfs_root(vfs_t *vfsp, vnode_t **vpp)
626*4bff34e3Sthurlow {
627*4bff34e3Sthurlow 	smbmntinfo_t	*smi;
628*4bff34e3Sthurlow 	vnode_t		*vp;
629*4bff34e3Sthurlow 
630*4bff34e3Sthurlow 	smi = VFTOSMI(vfsp);
631*4bff34e3Sthurlow 
632*4bff34e3Sthurlow 	if (curproc->p_zone != smi->smi_zone)
633*4bff34e3Sthurlow 		return (EPERM);
634*4bff34e3Sthurlow 
635*4bff34e3Sthurlow 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
636*4bff34e3Sthurlow 		return (EIO);
637*4bff34e3Sthurlow 
638*4bff34e3Sthurlow 	/*
639*4bff34e3Sthurlow 	 * The root vp is created in mount and held
640*4bff34e3Sthurlow 	 * until unmount, so this is paranoia.
641*4bff34e3Sthurlow 	 */
642*4bff34e3Sthurlow 	if (smi->smi_root == NULL)
643*4bff34e3Sthurlow 		return (EIO);
644*4bff34e3Sthurlow 
645*4bff34e3Sthurlow 	/* Just take a reference and return it. */
646*4bff34e3Sthurlow 	vp = SMBTOV(smi->smi_root);
647*4bff34e3Sthurlow 	VN_HOLD(vp);
648*4bff34e3Sthurlow 	*vpp = vp;
649*4bff34e3Sthurlow 
650*4bff34e3Sthurlow 	return (0);
651*4bff34e3Sthurlow }
652*4bff34e3Sthurlow 
653*4bff34e3Sthurlow /*
654*4bff34e3Sthurlow  * Get file system statistics.
655*4bff34e3Sthurlow  */
656*4bff34e3Sthurlow static int
657*4bff34e3Sthurlow smbfs_statvfs(vfs_t *vfsp, statvfs64_t *sbp)
658*4bff34e3Sthurlow {
659*4bff34e3Sthurlow 	int		error;
660*4bff34e3Sthurlow 	smbmntinfo_t	*smi = VFTOSMI(vfsp);
661*4bff34e3Sthurlow 	smb_share_t	*ssp = smi->smi_share;
662*4bff34e3Sthurlow 	statvfs64_t	stvfs;
663*4bff34e3Sthurlow 	hrtime_t now;
664*4bff34e3Sthurlow 	smb_cred_t	scred;
665*4bff34e3Sthurlow 
666*4bff34e3Sthurlow 	if (curproc->p_zone != smi->smi_zone)
667*4bff34e3Sthurlow 		return (EPERM);
668*4bff34e3Sthurlow 
669*4bff34e3Sthurlow 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
670*4bff34e3Sthurlow 		return (EIO);
671*4bff34e3Sthurlow 
672*4bff34e3Sthurlow 	mutex_enter(&smi->smi_lock);
673*4bff34e3Sthurlow 
674*4bff34e3Sthurlow 	/*
675*4bff34e3Sthurlow 	 * Use cached result if still valid.
676*4bff34e3Sthurlow 	 */
677*4bff34e3Sthurlow recheck:
678*4bff34e3Sthurlow 	now = gethrtime();
679*4bff34e3Sthurlow 	if (now < smi->smi_statfstime) {
680*4bff34e3Sthurlow 		goto cache_hit;
681*4bff34e3Sthurlow 	}
682*4bff34e3Sthurlow 
683*4bff34e3Sthurlow 	/*
684*4bff34e3Sthurlow 	 * FS attributes are stale, so someone
685*4bff34e3Sthurlow 	 * needs to do an OTW call to get them.
686*4bff34e3Sthurlow 	 * Serialize here so only one thread
687*4bff34e3Sthurlow 	 * does the OTW call.
688*4bff34e3Sthurlow 	 */
689*4bff34e3Sthurlow 	if (smi->smi_status & SM_STATUS_STATFS_BUSY) {
690*4bff34e3Sthurlow 		smi->smi_status |= SM_STATUS_STATFS_WANT;
691*4bff34e3Sthurlow 		if (!cv_wait_sig(&smi->smi_statvfs_cv, &smi->smi_lock)) {
692*4bff34e3Sthurlow 			mutex_exit(&smi->smi_lock);
693*4bff34e3Sthurlow 			return (EINTR);
694*4bff34e3Sthurlow 		}
695*4bff34e3Sthurlow 		/* Hope status is valid now. */
696*4bff34e3Sthurlow 		goto recheck;
697*4bff34e3Sthurlow 	}
698*4bff34e3Sthurlow 	smi->smi_status |= SM_STATUS_STATFS_BUSY;
699*4bff34e3Sthurlow 	mutex_exit(&smi->smi_lock);
700*4bff34e3Sthurlow 
701*4bff34e3Sthurlow 	/*
702*4bff34e3Sthurlow 	 * Do the OTW call.  Note: lock NOT held.
703*4bff34e3Sthurlow 	 */
704*4bff34e3Sthurlow 	smb_credinit(&scred, curproc, NULL);
705*4bff34e3Sthurlow 	bzero(&stvfs, sizeof (stvfs));
706*4bff34e3Sthurlow 	error = smbfs_smb_statfs(ssp, &stvfs, &scred);
707*4bff34e3Sthurlow 	smb_credrele(&scred);
708*4bff34e3Sthurlow 
709*4bff34e3Sthurlow 	mutex_enter(&smi->smi_lock);
710*4bff34e3Sthurlow 	if (smi->smi_status & SM_STATUS_STATFS_WANT)
711*4bff34e3Sthurlow 		cv_broadcast(&smi->smi_statvfs_cv);
712*4bff34e3Sthurlow 	smi->smi_status &= ~(SM_STATUS_STATFS_BUSY | SM_STATUS_STATFS_WANT);
713*4bff34e3Sthurlow 
714*4bff34e3Sthurlow 	if (error) {
715*4bff34e3Sthurlow 		SMBVDEBUG("statfs error=%d\n", error);
716*4bff34e3Sthurlow 		mutex_exit(&smi->smi_lock);
717*4bff34e3Sthurlow 		return (error);
718*4bff34e3Sthurlow 	}
719*4bff34e3Sthurlow 
720*4bff34e3Sthurlow 	/*
721*4bff34e3Sthurlow 	 * Set a few things the OTW call didn't get.
722*4bff34e3Sthurlow 	 */
723*4bff34e3Sthurlow 	stvfs.f_frsize = stvfs.f_bsize;
724*4bff34e3Sthurlow 	stvfs.f_favail = stvfs.f_ffree;
725*4bff34e3Sthurlow 	stvfs.f_fsid = (unsigned long)vfsp->vfs_fsid.val[0];
726*4bff34e3Sthurlow 	strncpy(stvfs.f_basetype, vfw.name, FSTYPSZ);
727*4bff34e3Sthurlow 	stvfs.f_flag	= vf_to_stf(vfsp->vfs_flag);
728*4bff34e3Sthurlow 	stvfs.f_namemax	= (uint32_t)MAXNAMELEN - 1;
729*4bff34e3Sthurlow 
730*4bff34e3Sthurlow 	/*
731*4bff34e3Sthurlow 	 * Save the result, update lifetime
732*4bff34e3Sthurlow 	 */
733*4bff34e3Sthurlow 	now = gethrtime();
734*4bff34e3Sthurlow 	smi->smi_statfstime = now +
735*4bff34e3Sthurlow 	    (SM_MAX_STATFSTIME * (hrtime_t)NANOSEC);
736*4bff34e3Sthurlow 	smi->smi_statvfsbuf = stvfs; /* struct assign! */
737*4bff34e3Sthurlow 
738*4bff34e3Sthurlow 	/*
739*4bff34e3Sthurlow 	 * Copy the statvfs data to caller's buf.
740*4bff34e3Sthurlow 	 * Note: struct assignment
741*4bff34e3Sthurlow 	 */
742*4bff34e3Sthurlow cache_hit:
743*4bff34e3Sthurlow 	*sbp = smi->smi_statvfsbuf;
744*4bff34e3Sthurlow 	mutex_exit(&smi->smi_lock);
745*4bff34e3Sthurlow 	return (error);
746*4bff34e3Sthurlow }
747*4bff34e3Sthurlow 
748*4bff34e3Sthurlow static kmutex_t smbfs_syncbusy;
749*4bff34e3Sthurlow 
750*4bff34e3Sthurlow /*
751*4bff34e3Sthurlow  * Flush dirty smbfs files for file system vfsp.
752*4bff34e3Sthurlow  * If vfsp == NULL, all smbfs files are flushed.
753*4bff34e3Sthurlow  */
754*4bff34e3Sthurlow /*ARGSUSED*/
755*4bff34e3Sthurlow static int
756*4bff34e3Sthurlow smbfs_sync(vfs_t *vfsp, short flag, cred_t *cr)
757*4bff34e3Sthurlow {
758*4bff34e3Sthurlow 	/*
759*4bff34e3Sthurlow 	 * Cross-zone calls are OK here, since this translates to a
760*4bff34e3Sthurlow 	 * VOP_PUTPAGE(B_ASYNC), which gets picked up by the right zone.
761*4bff34e3Sthurlow 	 */
762*4bff34e3Sthurlow #ifdef APPLE
763*4bff34e3Sthurlow 	if (!(flag & SYNC_ATTR) && mutex_tryenter(&smbfs_syncbusy) != 0) {
764*4bff34e3Sthurlow 		smbfs_rflush(vfsp, cr);
765*4bff34e3Sthurlow 		mutex_exit(&smbfs_syncbusy);
766*4bff34e3Sthurlow 	}
767*4bff34e3Sthurlow #endif /* APPLE */
768*4bff34e3Sthurlow 	return (0);
769*4bff34e3Sthurlow }
770*4bff34e3Sthurlow 
771*4bff34e3Sthurlow /*
772*4bff34e3Sthurlow  * Initialization routine for VFS routines.  Should only be called once
773*4bff34e3Sthurlow  */
774*4bff34e3Sthurlow int
775*4bff34e3Sthurlow smbfs_vfsinit(void)
776*4bff34e3Sthurlow {
777*4bff34e3Sthurlow 	mutex_init(&smbfs_syncbusy, NULL, MUTEX_DEFAULT, NULL);
778*4bff34e3Sthurlow 	return (0);
779*4bff34e3Sthurlow }
780*4bff34e3Sthurlow 
781*4bff34e3Sthurlow /*
782*4bff34e3Sthurlow  * Shutdown routine for VFS routines.  Should only be called once
783*4bff34e3Sthurlow  */
784*4bff34e3Sthurlow void
785*4bff34e3Sthurlow smbfs_vfsfini(void)
786*4bff34e3Sthurlow {
787*4bff34e3Sthurlow 	mutex_destroy(&smbfs_syncbusy);
788*4bff34e3Sthurlow }
789*4bff34e3Sthurlow 
790*4bff34e3Sthurlow void
791*4bff34e3Sthurlow smbfs_freevfs(vfs_t *vfsp)
792*4bff34e3Sthurlow {
793*4bff34e3Sthurlow 	smbmntinfo_t    *smi;
794*4bff34e3Sthurlow 	smb_share_t 	*ssp;
795*4bff34e3Sthurlow 
796*4bff34e3Sthurlow 	/* free up the resources */
797*4bff34e3Sthurlow 	smi = VFTOSMI(vfsp);
798*4bff34e3Sthurlow 
799*4bff34e3Sthurlow 	/*
800*4bff34e3Sthurlow 	 * By this time we should have already deleted the
801*4bff34e3Sthurlow 	 * smi kstats in the unmount code.  If they are still around
802*4bff34e3Sthurlow 	 * something is wrong
803*4bff34e3Sthurlow 	 */
804*4bff34e3Sthurlow 	ASSERT(smi->smi_io_kstats == NULL);
805*4bff34e3Sthurlow 
806*4bff34e3Sthurlow 	/*
807*4bff34e3Sthurlow 	 * Drop our reference to the share.
808*4bff34e3Sthurlow 	 * This usually leads to VC close.
809*4bff34e3Sthurlow 	 */
810*4bff34e3Sthurlow 	ssp = smi->smi_share;
811*4bff34e3Sthurlow 	smi->smi_share = NULL;
812*4bff34e3Sthurlow 	ssp->ss_mount = NULL;
813*4bff34e3Sthurlow 
814*4bff34e3Sthurlow 	smb_share_rele(ssp);
815*4bff34e3Sthurlow 
816*4bff34e3Sthurlow 	zone_rele(smi->smi_zone);
817*4bff34e3Sthurlow 
818*4bff34e3Sthurlow 	smbfs_free_smi(smi);
819*4bff34e3Sthurlow 
820*4bff34e3Sthurlow 	/*
821*4bff34e3Sthurlow 	 * Allow _fini() to succeed now, if so desired.
822*4bff34e3Sthurlow 	 */
823*4bff34e3Sthurlow 	atomic_dec_32(&smbfs_mountcount);
824*4bff34e3Sthurlow }
825*4bff34e3Sthurlow 
826*4bff34e3Sthurlow /*
827*4bff34e3Sthurlow  * smbfs_mount_label_policy:
828*4bff34e3Sthurlow  *	Determine whether the mount is allowed according to MAC check,
829*4bff34e3Sthurlow  *	by comparing (where appropriate) label of the remote server
830*4bff34e3Sthurlow  *	against the label of the zone being mounted into.
831*4bff34e3Sthurlow  *
832*4bff34e3Sthurlow  *	Returns:
833*4bff34e3Sthurlow  *		 0 :	access allowed
834*4bff34e3Sthurlow  *		-1 :	read-only access allowed (i.e., read-down)
835*4bff34e3Sthurlow  *		>0 :	error code, such as EACCES
836*4bff34e3Sthurlow  *
837*4bff34e3Sthurlow  * NB:
838*4bff34e3Sthurlow  * NFS supports Cipso labels by parsing the vfs_resource
839*4bff34e3Sthurlow  * to see what the Solaris server global zone has shared.
840*4bff34e3Sthurlow  * We can't support that for CIFS since resource names
841*4bff34e3Sthurlow  * contain share names, not paths.
842*4bff34e3Sthurlow  */
843*4bff34e3Sthurlow static int
844*4bff34e3Sthurlow smbfs_mount_label_policy(vfs_t *vfsp, void *ipaddr, int addr_type, cred_t *cr)
845*4bff34e3Sthurlow {
846*4bff34e3Sthurlow 	bslabel_t	*server_sl, *mntlabel;
847*4bff34e3Sthurlow 	zone_t		*mntzone = NULL;
848*4bff34e3Sthurlow 	ts_label_t	*zlabel;
849*4bff34e3Sthurlow 	tsol_tpc_t	*tp;
850*4bff34e3Sthurlow 	ts_label_t	*tsl = NULL;
851*4bff34e3Sthurlow 	int		retv;
852*4bff34e3Sthurlow 
853*4bff34e3Sthurlow 	/*
854*4bff34e3Sthurlow 	 * Get the zone's label.  Each zone on a labeled system has a label.
855*4bff34e3Sthurlow 	 */
856*4bff34e3Sthurlow 	mntzone = zone_find_by_any_path(refstr_value(vfsp->vfs_mntpt), B_FALSE);
857*4bff34e3Sthurlow 	zlabel = mntzone->zone_slabel;
858*4bff34e3Sthurlow 	ASSERT(zlabel != NULL);
859*4bff34e3Sthurlow 	label_hold(zlabel);
860*4bff34e3Sthurlow 
861*4bff34e3Sthurlow 	retv = EACCES;				/* assume the worst */
862*4bff34e3Sthurlow 
863*4bff34e3Sthurlow 	/*
864*4bff34e3Sthurlow 	 * Next, get the assigned label of the remote server.
865*4bff34e3Sthurlow 	 */
866*4bff34e3Sthurlow 	tp = find_tpc(ipaddr, addr_type, B_FALSE);
867*4bff34e3Sthurlow 	if (tp == NULL)
868*4bff34e3Sthurlow 		goto out;			/* error getting host entry */
869*4bff34e3Sthurlow 
870*4bff34e3Sthurlow 	if (tp->tpc_tp.tp_doi != zlabel->tsl_doi)
871*4bff34e3Sthurlow 		goto rel_tpc;			/* invalid domain */
872*4bff34e3Sthurlow 	if ((tp->tpc_tp.host_type != UNLABELED))
873*4bff34e3Sthurlow 		goto rel_tpc;			/* invalid hosttype */
874*4bff34e3Sthurlow 
875*4bff34e3Sthurlow 	server_sl = &tp->tpc_tp.tp_def_label;
876*4bff34e3Sthurlow 	mntlabel = label2bslabel(zlabel);
877*4bff34e3Sthurlow 
878*4bff34e3Sthurlow 	/*
879*4bff34e3Sthurlow 	 * Now compare labels to complete the MAC check.  If the labels
880*4bff34e3Sthurlow 	 * are equal or if the requestor is in the global zone and has
881*4bff34e3Sthurlow 	 * NET_MAC_AWARE, then allow read-write access.   (Except for
882*4bff34e3Sthurlow 	 * mounts into the global zone itself; restrict these to
883*4bff34e3Sthurlow 	 * read-only.)
884*4bff34e3Sthurlow 	 *
885*4bff34e3Sthurlow 	 * If the requestor is in some other zone, but his label
886*4bff34e3Sthurlow 	 * dominates the server, then allow read-down.
887*4bff34e3Sthurlow 	 *
888*4bff34e3Sthurlow 	 * Otherwise, access is denied.
889*4bff34e3Sthurlow 	 */
890*4bff34e3Sthurlow 	if (blequal(mntlabel, server_sl) ||
891*4bff34e3Sthurlow 	    (crgetzoneid(cr) == GLOBAL_ZONEID &&
892*4bff34e3Sthurlow 	    getpflags(NET_MAC_AWARE, cr) != 0)) {
893*4bff34e3Sthurlow 		if ((mntzone == global_zone) ||
894*4bff34e3Sthurlow 		    !blequal(mntlabel, server_sl))
895*4bff34e3Sthurlow 			retv = -1;		/* read-only */
896*4bff34e3Sthurlow 		else
897*4bff34e3Sthurlow 			retv = 0;		/* access OK */
898*4bff34e3Sthurlow 	} else if (bldominates(mntlabel, server_sl)) {
899*4bff34e3Sthurlow 		retv = -1;			/* read-only */
900*4bff34e3Sthurlow 	} else {
901*4bff34e3Sthurlow 		retv = EACCES;
902*4bff34e3Sthurlow 	}
903*4bff34e3Sthurlow 
904*4bff34e3Sthurlow 	if (tsl != NULL)
905*4bff34e3Sthurlow 		label_rele(tsl);
906*4bff34e3Sthurlow 
907*4bff34e3Sthurlow rel_tpc:
908*4bff34e3Sthurlow 	/*LINTED*/
909*4bff34e3Sthurlow 	TPC_RELE(tp);
910*4bff34e3Sthurlow out:
911*4bff34e3Sthurlow 	if (mntzone)
912*4bff34e3Sthurlow 		zone_rele(mntzone);
913*4bff34e3Sthurlow 	label_rele(zlabel);
914*4bff34e3Sthurlow 	return (retv);
915*4bff34e3Sthurlow }
916