1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smbfs_vnops.c,v 1.128.36.1 2005/05/27 02:35:28 lindak Exp $
33  */
34 
35 /*
36  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37  * Copyright 2017 Nexenta Systems, Inc.  All rights reserved.
38  */
39 
40 /*
41  * Vnode operations
42  *
43  * This file is similar to nfs3_vnops.c
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/cred.h>
49 #include <sys/vnode.h>
50 #include <sys/vfs.h>
51 #include <sys/filio.h>
52 #include <sys/uio.h>
53 #include <sys/dirent.h>
54 #include <sys/errno.h>
55 #include <sys/sunddi.h>
56 #include <sys/sysmacros.h>
57 #include <sys/kmem.h>
58 #include <sys/cmn_err.h>
59 #include <sys/vfs_opreg.h>
60 #include <sys/policy.h>
61 #include <sys/sdt.h>
62 #include <sys/taskq_impl.h>
63 #include <sys/zone.h>
64 
65 #ifdef	_KERNEL
66 #include <sys/vmsystm.h>	// for desfree
67 #include <vm/hat.h>
68 #include <vm/as.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/seg.h>
72 #include <vm/seg_map.h>
73 #include <vm/seg_kpm.h>
74 #include <vm/seg_vn.h>
75 #endif	// _KERNEL
76 
77 #include <netsmb/smb_osdep.h>
78 #include <netsmb/smb.h>
79 #include <netsmb/smb_conn.h>
80 #include <netsmb/smb_subr.h>
81 
82 #include <smbfs/smbfs.h>
83 #include <smbfs/smbfs_node.h>
84 #include <smbfs/smbfs_subr.h>
85 
86 #include <sys/fs/smbfs_ioctl.h>
87 #include <fs/fs_subr.h>
88 
89 #ifndef	MAXOFF32_T
90 #define	MAXOFF32_T	0x7fffffff
91 #endif
92 
93 /*
94  * We assign directory offsets like the NFS client, where the
95  * offset increments by _one_ after each directory entry.
96  * Further, the entries "." and ".." are always at offsets
97  * zero and one (respectively) and the "real" entries from
98  * the server appear at offsets starting with two.  This
99  * macro is used to initialize the n_dirofs field after
100  * setting n_dirseq with a _findopen call.
101  */
102 #define	FIRST_DIROFS	2
103 
104 /*
105  * These characters are illegal in NTFS file names.
106  * ref: http://support.microsoft.com/kb/147438
107  *
108  * Careful!  The check in the XATTR case skips the
109  * first character to allow colon in XATTR names.
110  */
111 static const char illegal_chars[] = {
112 	':',	/* colon - keep this first! */
113 	'\\',	/* back slash */
114 	'/',	/* slash */
115 	'*',	/* asterisk */
116 	'?',	/* question mark */
117 	'"',	/* double quote */
118 	'<',	/* less than sign */
119 	'>',	/* greater than sign */
120 	'|',	/* vertical bar */
121 	0
122 };
123 
124 /*
125  * Turning this on causes nodes to be created in the cache
126  * during directory listings, normally avoiding a second
127  * OtW attribute fetch just after a readdir.
128  */
129 int smbfs_fastlookup = 1;
130 
131 struct vnodeops *smbfs_vnodeops = NULL;
132 
133 /* local static function defines */
134 
135 static int	smbfslookup_cache(vnode_t *, char *, int, vnode_t **,
136 			cred_t *);
137 static int	smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
138 			int cache_ok, caller_context_t *);
139 static int	smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
140 			int flags);
141 static int	smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp,
142 			char *nnm, struct smb_cred *scred, int flags);
143 static int	smbfssetattr(vnode_t *, struct vattr *, int, cred_t *);
144 static int	smbfs_accessx(void *, int, cred_t *);
145 static int	smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
146 			caller_context_t *);
147 static void	smbfs_rele_fid(smbnode_t *, struct smb_cred *);
148 static uint32_t xvattr_to_dosattr(smbnode_t *, struct vattr *);
149 
150 static int	smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
151 
152 static int	smbfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
153 			caller_context_t *);
154 #ifdef	_KERNEL
155 static int	smbfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
156 			page_t *[], size_t, struct seg *, caddr_t,
157 			enum seg_rw, cred_t *);
158 static int	smbfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *,
159 			int, cred_t *);
160 static void	smbfs_delmap_async(void *);
161 
162 static int	smbfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
163 			cred_t *);
164 static int	smbfs_bio(struct buf *, int, cred_t *);
165 static int	smbfs_writenp(smbnode_t *np, caddr_t base, int tcount,
166 			struct uio *uiop, int pgcreated);
167 #endif	// _KERNEL
168 
169 /*
170  * Error flags used to pass information about certain special errors
171  * which need to be handled specially.
172  */
173 #define	SMBFS_EOF			-98
174 
175 /* When implementing OtW locks, make this a real function. */
176 #define	smbfs_lm_has_sleep(vp) 0
177 
178 /*
179  * These are the vnode ops routines which implement the vnode interface to
180  * the networked file system.  These routines just take their parameters,
181  * make them look networkish by putting the right info into interface structs,
182  * and then calling the appropriate remote routine(s) to do the work.
183  *
184  * Note on directory name lookup cacheing:  If we detect a stale fhandle,
185  * we purge the directory cache relative to that vnode.  This way, the
186  * user won't get burned by the cache repeatedly.  See <smbfs/smbnode.h> for
187  * more details on smbnode locking.
188  */
189 
190 
191 /*
192  * XXX
193  * When new and relevant functionality is enabled, we should be
194  * calling vfs_set_feature() to inform callers that pieces of
195  * functionality are available, per PSARC 2007/227.
196  */
197 /* ARGSUSED */
198 static int
199 smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
200 {
201 	smbnode_t	*np;
202 	vnode_t		*vp;
203 	smbfattr_t	fa;
204 	u_int32_t	rights, rightsrcvd;
205 	u_int16_t	fid, oldfid;
206 	int		oldgenid;
207 	struct smb_cred scred;
208 	smbmntinfo_t	*smi;
209 	smb_share_t	*ssp;
210 	cred_t		*oldcr;
211 	int		tmperror;
212 	int		error = 0;
213 
214 	vp = *vpp;
215 	np = VTOSMB(vp);
216 	smi = VTOSMI(vp);
217 	ssp = smi->smi_share;
218 
219 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
220 		return (EIO);
221 
222 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
223 		return (EIO);
224 
225 	if (vp->v_type != VREG && vp->v_type != VDIR) { /* XXX VLNK? */
226 		SMBVDEBUG("open eacces vtype=%d\n", vp->v_type);
227 		return (EACCES);
228 	}
229 
230 	/*
231 	 * Get exclusive access to n_fid and related stuff.
232 	 * No returns after this until out.
233 	 */
234 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
235 		return (EINTR);
236 	smb_credinit(&scred, cr);
237 
238 	/*
239 	 * Keep track of the vnode type at first open.
240 	 * It may change later, and we need close to do
241 	 * cleanup for the type we opened.  Also deny
242 	 * open of new types until old type is closed.
243 	 */
244 	if (np->n_ovtype == VNON) {
245 		ASSERT(np->n_dirrefs == 0);
246 		ASSERT(np->n_fidrefs == 0);
247 	} else if (np->n_ovtype != vp->v_type) {
248 		SMBVDEBUG("open n_ovtype=%d v_type=%d\n",
249 		    np->n_ovtype, vp->v_type);
250 		error = EACCES;
251 		goto out;
252 	}
253 
254 	/*
255 	 * Directory open.  See smbfs_readvdir()
256 	 */
257 	if (vp->v_type == VDIR) {
258 		if (np->n_dirseq == NULL) {
259 			/* first open */
260 			error = smbfs_smb_findopen(np, "*", 1,
261 			    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
262 			    &scred, &np->n_dirseq);
263 			if (error != 0)
264 				goto out;
265 		}
266 		np->n_dirofs = FIRST_DIROFS;
267 		np->n_dirrefs++;
268 		goto have_fid;
269 	}
270 
271 	/*
272 	 * If caller specified O_TRUNC/FTRUNC, then be sure to set
273 	 * FWRITE (to drive successful setattr(size=0) after open)
274 	 */
275 	if (flag & FTRUNC)
276 		flag |= FWRITE;
277 
278 	/*
279 	 * If we already have it open, and the FID is still valid,
280 	 * check whether the rights are sufficient for FID reuse.
281 	 */
282 	if (np->n_fidrefs > 0 &&
283 	    np->n_vcgenid == ssp->ss_vcgenid) {
284 		int upgrade = 0;
285 
286 		if ((flag & FWRITE) &&
287 		    !(np->n_rights & SA_RIGHT_FILE_WRITE_DATA))
288 			upgrade = 1;
289 		if ((flag & FREAD) &&
290 		    !(np->n_rights & SA_RIGHT_FILE_READ_DATA))
291 			upgrade = 1;
292 		if (!upgrade) {
293 			/*
294 			 *  the existing open is good enough
295 			 */
296 			np->n_fidrefs++;
297 			goto have_fid;
298 		}
299 	}
300 	rights = np->n_fidrefs ? np->n_rights : 0;
301 
302 	/*
303 	 * we always ask for READ_CONTROL so we can always get the
304 	 * owner/group IDs to satisfy a stat.  Ditto attributes.
305 	 */
306 	rights |= (STD_RIGHT_READ_CONTROL_ACCESS |
307 	    SA_RIGHT_FILE_READ_ATTRIBUTES);
308 	if ((flag & FREAD))
309 		rights |= SA_RIGHT_FILE_READ_DATA;
310 	if ((flag & FWRITE))
311 		rights |= SA_RIGHT_FILE_WRITE_DATA |
312 		    SA_RIGHT_FILE_APPEND_DATA |
313 		    SA_RIGHT_FILE_WRITE_ATTRIBUTES;
314 
315 	bzero(&fa, sizeof (fa));
316 	error = smbfs_smb_open(np,
317 	    NULL, 0, 0, /* name nmlen xattr */
318 	    rights, &scred,
319 	    &fid, &rightsrcvd, &fa);
320 	if (error)
321 		goto out;
322 	smbfs_attrcache_fa(vp, &fa);
323 
324 	/*
325 	 * We have a new FID and access rights.
326 	 */
327 	oldfid = np->n_fid;
328 	oldgenid = np->n_vcgenid;
329 	np->n_fid = fid;
330 	np->n_vcgenid = ssp->ss_vcgenid;
331 	np->n_rights = rightsrcvd;
332 	np->n_fidrefs++;
333 	if (np->n_fidrefs > 1 &&
334 	    oldgenid == ssp->ss_vcgenid) {
335 		/*
336 		 * We already had it open (presumably because
337 		 * it was open with insufficient rights.)
338 		 * Close old wire-open.
339 		 */
340 		tmperror = smbfs_smb_close(ssp,
341 		    oldfid, NULL, &scred);
342 		if (tmperror)
343 			SMBVDEBUG("error %d closing %s\n",
344 			    tmperror, np->n_rpath);
345 	}
346 
347 	/*
348 	 * This thread did the open.
349 	 * Save our credentials too.
350 	 */
351 	mutex_enter(&np->r_statelock);
352 	oldcr = np->r_cred;
353 	np->r_cred = cr;
354 	crhold(cr);
355 	if (oldcr)
356 		crfree(oldcr);
357 	mutex_exit(&np->r_statelock);
358 
359 have_fid:
360 	/*
361 	 * Keep track of the vnode type at first open.
362 	 * (see comments above)
363 	 */
364 	if (np->n_ovtype == VNON)
365 		np->n_ovtype = vp->v_type;
366 
367 out:
368 	smb_credrele(&scred);
369 	smbfs_rw_exit(&np->r_lkserlock);
370 	return (error);
371 }
372 
373 /*ARGSUSED*/
374 static int
375 smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
376 	caller_context_t *ct)
377 {
378 	smbnode_t	*np;
379 	smbmntinfo_t	*smi;
380 	struct smb_cred scred;
381 	int error = 0;
382 
383 	np = VTOSMB(vp);
384 	smi = VTOSMI(vp);
385 
386 	/*
387 	 * Don't "bail out" for VFS_UNMOUNTED here,
388 	 * as we want to do cleanup, etc.
389 	 */
390 
391 	/*
392 	 * zone_enter(2) prevents processes from changing zones with SMBFS files
393 	 * open; if we happen to get here from the wrong zone we can't do
394 	 * anything over the wire.
395 	 */
396 	if (smi->smi_zone_ref.zref_zone != curproc->p_zone) {
397 		/*
398 		 * We could attempt to clean up locks, except we're sure
399 		 * that the current process didn't acquire any locks on
400 		 * the file: any attempt to lock a file belong to another zone
401 		 * will fail, and one can't lock an SMBFS file and then change
402 		 * zones, as that fails too.
403 		 *
404 		 * Returning an error here is the sane thing to do.  A
405 		 * subsequent call to VN_RELE() which translates to a
406 		 * smbfs_inactive() will clean up state: if the zone of the
407 		 * vnode's origin is still alive and kicking, an async worker
408 		 * thread will handle the request (from the correct zone), and
409 		 * everything (minus the final smbfs_getattr_otw() call) should
410 		 * be OK. If the zone is going away smbfs_async_inactive() will
411 		 * throw away cached pages inline.
412 		 */
413 		return (EIO);
414 	}
415 
416 	/*
417 	 * If we are using local locking for this filesystem, then
418 	 * release all of the SYSV style record locks.  Otherwise,
419 	 * we are doing network locking and we need to release all
420 	 * of the network locks.  All of the locks held by this
421 	 * process on this file are released no matter what the
422 	 * incoming reference count is.
423 	 */
424 	if (smi->smi_flags & SMI_LLOCK) {
425 		pid_t pid = ddi_get_pid();
426 		cleanlocks(vp, pid, 0);
427 		cleanshares(vp, pid);
428 	}
429 	/*
430 	 * else doing OtW locking.  SMB servers drop all locks
431 	 * on the file ID we close here, so no _lockrelease()
432 	 */
433 
434 	/*
435 	 * This (passed in) count is the ref. count from the
436 	 * user's file_t before the closef call (fio.c).
437 	 * The rest happens only on last close.
438 	 */
439 	if (count > 1)
440 		return (0);
441 
442 	/* NFS has DNLC purge here. */
443 
444 	/*
445 	 * If the file was open for write and there are pages,
446 	 * then make sure dirty pages written back.
447 	 *
448 	 * NFS does this async when "close-to-open" is off
449 	 * (MI_NOCTO flag is set) to avoid blocking the caller.
450 	 * For now, always do this synchronously (no B_ASYNC).
451 	 */
452 	if ((flag & FWRITE) && vn_has_cached_data(vp)) {
453 		error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
454 		if (error == EAGAIN)
455 			error = 0;
456 	}
457 	if (error == 0) {
458 		mutex_enter(&np->r_statelock);
459 		np->r_flags &= ~RSTALE;
460 		np->r_error = 0;
461 		mutex_exit(&np->r_statelock);
462 	}
463 
464 	/*
465 	 * Decrement the reference count for the FID
466 	 * and possibly do the OtW close.
467 	 *
468 	 * Exclusive lock for modifying n_fid stuff.
469 	 * Don't want this one ever interruptible.
470 	 */
471 	(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
472 	smb_credinit(&scred, cr);
473 
474 	smbfs_rele_fid(np, &scred);
475 
476 	smb_credrele(&scred);
477 	smbfs_rw_exit(&np->r_lkserlock);
478 
479 	return (0);
480 }
481 
482 /*
483  * Helper for smbfs_close.  Decrement the reference count
484  * for an SMB-level file or directory ID, and when the last
485  * reference for the fid goes away, do the OtW close.
486  * Also called in smbfs_inactive (defensive cleanup).
487  */
488 static void
489 smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
490 {
491 	smb_share_t	*ssp;
492 	cred_t		*oldcr;
493 	struct smbfs_fctx *fctx;
494 	int		error;
495 	uint16_t ofid;
496 
497 	ssp = np->n_mount->smi_share;
498 	error = 0;
499 
500 	/* Make sure we serialize for n_dirseq use. */
501 	ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
502 
503 	/*
504 	 * Note that vp->v_type may change if a remote node
505 	 * is deleted and recreated as a different type, and
506 	 * our getattr may change v_type accordingly.
507 	 * Now use n_ovtype to keep track of the v_type
508 	 * we had during open (see comments above).
509 	 */
510 	switch (np->n_ovtype) {
511 	case VDIR:
512 		ASSERT(np->n_dirrefs > 0);
513 		if (--np->n_dirrefs)
514 			return;
515 		if ((fctx = np->n_dirseq) != NULL) {
516 			np->n_dirseq = NULL;
517 			np->n_dirofs = 0;
518 			error = smbfs_smb_findclose(fctx, scred);
519 		}
520 		break;
521 
522 	case VREG:
523 		ASSERT(np->n_fidrefs > 0);
524 		if (--np->n_fidrefs)
525 			return;
526 		if ((ofid = np->n_fid) != SMB_FID_UNUSED) {
527 			np->n_fid = SMB_FID_UNUSED;
528 			/* After reconnect, n_fid is invalid */
529 			if (np->n_vcgenid == ssp->ss_vcgenid) {
530 				error = smbfs_smb_close(
531 				    ssp, ofid, NULL, scred);
532 			}
533 		}
534 		break;
535 
536 	default:
537 		SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
538 		break;
539 	}
540 	if (error) {
541 		SMBVDEBUG("error %d closing %s\n",
542 		    error, np->n_rpath);
543 	}
544 
545 	/* Allow next open to use any v_type. */
546 	np->n_ovtype = VNON;
547 
548 	/*
549 	 * Other "last close" stuff.
550 	 */
551 	mutex_enter(&np->r_statelock);
552 	if (np->n_flag & NATTRCHANGED)
553 		smbfs_attrcache_rm_locked(np);
554 	oldcr = np->r_cred;
555 	np->r_cred = NULL;
556 	mutex_exit(&np->r_statelock);
557 	if (oldcr != NULL)
558 		crfree(oldcr);
559 }
560 
561 /* ARGSUSED */
562 static int
563 smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
564 	caller_context_t *ct)
565 {
566 	struct smb_cred scred;
567 	struct vattr	va;
568 	smbnode_t	*np;
569 	smbmntinfo_t	*smi;
570 	smb_share_t	*ssp;
571 	offset_t	endoff;
572 	ssize_t		past_eof;
573 	int		error;
574 
575 	np = VTOSMB(vp);
576 	smi = VTOSMI(vp);
577 	ssp = smi->smi_share;
578 
579 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
580 		return (EIO);
581 
582 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
583 		return (EIO);
584 
585 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
586 
587 	if (vp->v_type != VREG)
588 		return (EISDIR);
589 
590 	if (uiop->uio_resid == 0)
591 		return (0);
592 
593 	/*
594 	 * Like NFS3, just check for 63-bit overflow.
595 	 * Our SMB layer takes care to return EFBIG
596 	 * when it has to fallback to a 32-bit call.
597 	 */
598 	endoff = uiop->uio_loffset + uiop->uio_resid;
599 	if (uiop->uio_loffset < 0 || endoff < 0)
600 		return (EINVAL);
601 
602 	/* get vnode attributes from server */
603 	va.va_mask = AT_SIZE | AT_MTIME;
604 	if (error = smbfsgetattr(vp, &va, cr))
605 		return (error);
606 
607 	/* Update mtime with mtime from server here? */
608 
609 	/* if offset is beyond EOF, read nothing */
610 	if (uiop->uio_loffset >= va.va_size)
611 		return (0);
612 
613 	/*
614 	 * Limit the read to the remaining file size.
615 	 * Do this by temporarily reducing uio_resid
616 	 * by the amount the lies beyoned the EOF.
617 	 */
618 	if (endoff > va.va_size) {
619 		past_eof = (ssize_t)(endoff - va.va_size);
620 		uiop->uio_resid -= past_eof;
621 	} else
622 		past_eof = 0;
623 
624 	/*
625 	 * Bypass VM if caching has been disabled (e.g., locking) or if
626 	 * using client-side direct I/O and the file is not mmap'd and
627 	 * there are no cached pages.
628 	 */
629 	if ((vp->v_flag & VNOCACHE) ||
630 	    (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
631 	    np->r_mapcnt == 0 && np->r_inmap == 0 &&
632 	    !vn_has_cached_data(vp))) {
633 
634 		/* Shared lock for n_fid use in smb_rwuio */
635 		if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
636 			return (EINTR);
637 		smb_credinit(&scred, cr);
638 
639 		/* After reconnect, n_fid is invalid */
640 		if (np->n_vcgenid != ssp->ss_vcgenid)
641 			error = ESTALE;
642 		else
643 			error = smb_rwuio(ssp, np->n_fid, UIO_READ,
644 			    uiop, &scred, smb_timo_read);
645 
646 		smb_credrele(&scred);
647 		smbfs_rw_exit(&np->r_lkserlock);
648 
649 		/* undo adjustment of resid */
650 		uiop->uio_resid += past_eof;
651 
652 		return (error);
653 	}
654 
655 #ifdef	_KERNEL
656 	/* (else) Do I/O through segmap. */
657 	do {
658 		caddr_t		base;
659 		u_offset_t	off;
660 		size_t		n;
661 		int		on;
662 		uint_t		flags;
663 
664 		off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
665 		on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
666 		n = MIN(MAXBSIZE - on, uiop->uio_resid);
667 
668 		error = smbfs_validate_caches(vp, cr);
669 		if (error)
670 			break;
671 
672 		/* NFS waits for RINCACHEPURGE here. */
673 
674 		if (vpm_enable) {
675 			/*
676 			 * Copy data.
677 			 */
678 			error = vpm_data_copy(vp, off + on, n, uiop,
679 			    1, NULL, 0, S_READ);
680 		} else {
681 			base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
682 			    S_READ);
683 
684 			error = uiomove(base + on, n, UIO_READ, uiop);
685 		}
686 
687 		if (!error) {
688 			/*
689 			 * If read a whole block or read to eof,
690 			 * won't need this buffer again soon.
691 			 */
692 			mutex_enter(&np->r_statelock);
693 			if (n + on == MAXBSIZE ||
694 			    uiop->uio_loffset == np->r_size)
695 				flags = SM_DONTNEED;
696 			else
697 				flags = 0;
698 			mutex_exit(&np->r_statelock);
699 			if (vpm_enable) {
700 				error = vpm_sync_pages(vp, off, n, flags);
701 			} else {
702 				error = segmap_release(segkmap, base, flags);
703 			}
704 		} else {
705 			if (vpm_enable) {
706 				(void) vpm_sync_pages(vp, off, n, 0);
707 			} else {
708 				(void) segmap_release(segkmap, base, 0);
709 			}
710 		}
711 	} while (!error && uiop->uio_resid > 0);
712 #else	// _KERNEL
713 	error = ENOSYS;
714 #endif	// _KERNEL
715 
716 	/* undo adjustment of resid */
717 	uiop->uio_resid += past_eof;
718 
719 	return (error);
720 }
721 
722 
723 /* ARGSUSED */
724 static int
725 smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
726 	caller_context_t *ct)
727 {
728 	struct smb_cred scred;
729 	struct vattr    va;
730 	smbnode_t	*np;
731 	smbmntinfo_t	*smi;
732 	smb_share_t	*ssp;
733 	offset_t	endoff, limit;
734 	ssize_t		past_limit;
735 	int		error, timo;
736 	u_offset_t	last_off;
737 	size_t		last_resid;
738 #ifdef	_KERNEL
739 	uint_t		bsize;
740 #endif
741 
742 	np = VTOSMB(vp);
743 	smi = VTOSMI(vp);
744 	ssp = smi->smi_share;
745 
746 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
747 		return (EIO);
748 
749 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
750 		return (EIO);
751 
752 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
753 
754 	if (vp->v_type != VREG)
755 		return (EISDIR);
756 
757 	if (uiop->uio_resid == 0)
758 		return (0);
759 
760 	/*
761 	 * Handle ioflag bits: (FAPPEND|FSYNC|FDSYNC)
762 	 */
763 	if (ioflag & (FAPPEND | FSYNC)) {
764 		if (np->n_flag & NMODIFIED) {
765 			smbfs_attrcache_remove(np);
766 		}
767 	}
768 	if (ioflag & FAPPEND) {
769 		/*
770 		 * File size can be changed by another client
771 		 *
772 		 * Todo: Consider redesigning this to use a
773 		 * handle opened for append instead.
774 		 */
775 		va.va_mask = AT_SIZE;
776 		if (error = smbfsgetattr(vp, &va, cr))
777 			return (error);
778 		uiop->uio_loffset = va.va_size;
779 	}
780 
781 	/*
782 	 * Like NFS3, just check for 63-bit overflow.
783 	 */
784 	endoff = uiop->uio_loffset + uiop->uio_resid;
785 	if (uiop->uio_loffset < 0 || endoff < 0)
786 		return (EINVAL);
787 
788 	/*
789 	 * Check to make sure that the process will not exceed
790 	 * its limit on file size.  It is okay to write up to
791 	 * the limit, but not beyond.  Thus, the write which
792 	 * reaches the limit will be short and the next write
793 	 * will return an error.
794 	 *
795 	 * So if we're starting at or beyond the limit, EFBIG.
796 	 * Otherwise, temporarily reduce resid to the amount
797 	 * that is after the limit.
798 	 */
799 	limit = uiop->uio_llimit;
800 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
801 		limit = MAXOFFSET_T;
802 	if (uiop->uio_loffset >= limit) {
803 #ifdef	_KERNEL
804 		proc_t *p = ttoproc(curthread);
805 
806 		mutex_enter(&p->p_lock);
807 		(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE],
808 		    p->p_rctls, p, RCA_UNSAFE_SIGINFO);
809 		mutex_exit(&p->p_lock);
810 #endif	// _KERNEL
811 		return (EFBIG);
812 	}
813 	if (endoff > limit) {
814 		past_limit = (ssize_t)(endoff - limit);
815 		uiop->uio_resid -= past_limit;
816 	} else
817 		past_limit = 0;
818 
819 	/*
820 	 * Bypass VM if caching has been disabled (e.g., locking) or if
821 	 * using client-side direct I/O and the file is not mmap'd and
822 	 * there are no cached pages.
823 	 */
824 	if ((vp->v_flag & VNOCACHE) ||
825 	    (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
826 	    np->r_mapcnt == 0 && np->r_inmap == 0 &&
827 	    !vn_has_cached_data(vp))) {
828 
829 #ifdef	_KERNEL
830 smbfs_fwrite:
831 #endif	// _KERNEL
832 		if (np->r_flags & RSTALE) {
833 			last_resid = uiop->uio_resid;
834 			last_off = uiop->uio_loffset;
835 			error = np->r_error;
836 			/*
837 			 * A close may have cleared r_error, if so,
838 			 * propagate ESTALE error return properly
839 			 */
840 			if (error == 0)
841 				error = ESTALE;
842 			goto bottom;
843 		}
844 
845 		/* Timeout: longer for append. */
846 		timo = smb_timo_write;
847 		if (endoff > np->r_size)
848 			timo = smb_timo_append;
849 
850 		/* Shared lock for n_fid use in smb_rwuio */
851 		if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
852 			return (EINTR);
853 		smb_credinit(&scred, cr);
854 
855 		/* After reconnect, n_fid is invalid */
856 		if (np->n_vcgenid != ssp->ss_vcgenid)
857 			error = ESTALE;
858 		else
859 			error = smb_rwuio(ssp, np->n_fid, UIO_WRITE,
860 			    uiop, &scred, timo);
861 
862 		if (error == 0) {
863 			mutex_enter(&np->r_statelock);
864 			np->n_flag |= (NFLUSHWIRE | NATTRCHANGED);
865 			if (uiop->uio_loffset > (offset_t)np->r_size)
866 				np->r_size = (len_t)uiop->uio_loffset;
867 			mutex_exit(&np->r_statelock);
868 			if (ioflag & (FSYNC | FDSYNC)) {
869 				/* Don't error the I/O if this fails. */
870 				(void) smbfs_smb_flush(np, &scred);
871 			}
872 		}
873 
874 		smb_credrele(&scred);
875 		smbfs_rw_exit(&np->r_lkserlock);
876 
877 		/* undo adjustment of resid */
878 		uiop->uio_resid += past_limit;
879 
880 		return (error);
881 	}
882 
883 #ifdef	_KERNEL
884 	/* (else) Do I/O through segmap. */
885 	bsize = vp->v_vfsp->vfs_bsize;
886 
887 	do {
888 		caddr_t		base;
889 		u_offset_t	off;
890 		size_t		n;
891 		int		on;
892 		uint_t		flags;
893 
894 		off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
895 		on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
896 		n = MIN(MAXBSIZE - on, uiop->uio_resid);
897 
898 		last_resid = uiop->uio_resid;
899 		last_off = uiop->uio_loffset;
900 
901 		if (np->r_flags & RSTALE) {
902 			error = np->r_error;
903 			/*
904 			 * A close may have cleared r_error, if so,
905 			 * propagate ESTALE error return properly
906 			 */
907 			if (error == 0)
908 				error = ESTALE;
909 			break;
910 		}
911 
912 		/*
913 		 * From NFS: Don't create dirty pages faster than they
914 		 * can be cleaned.
915 		 *
916 		 * Here NFS also checks for async writes (np->r_awcount)
917 		 */
918 		mutex_enter(&np->r_statelock);
919 		while (np->r_gcount > 0) {
920 			if (SMBINTR(vp)) {
921 				klwp_t *lwp = ttolwp(curthread);
922 
923 				if (lwp != NULL)
924 					lwp->lwp_nostop++;
925 				if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) {
926 					mutex_exit(&np->r_statelock);
927 					if (lwp != NULL)
928 						lwp->lwp_nostop--;
929 					error = EINTR;
930 					goto bottom;
931 				}
932 				if (lwp != NULL)
933 					lwp->lwp_nostop--;
934 			} else
935 				cv_wait(&np->r_cv, &np->r_statelock);
936 		}
937 		mutex_exit(&np->r_statelock);
938 
939 		/*
940 		 * Touch the page and fault it in if it is not in core
941 		 * before segmap_getmapflt or vpm_data_copy can lock it.
942 		 * This is to avoid the deadlock if the buffer is mapped
943 		 * to the same file through mmap which we want to write.
944 		 */
945 		uio_prefaultpages((long)n, uiop);
946 
947 		if (vpm_enable) {
948 			/*
949 			 * It will use kpm mappings, so no need to
950 			 * pass an address.
951 			 */
952 			error = smbfs_writenp(np, NULL, n, uiop, 0);
953 		} else {
954 			if (segmap_kpm) {
955 				int pon = uiop->uio_loffset & PAGEOFFSET;
956 				size_t pn = MIN(PAGESIZE - pon,
957 				    uiop->uio_resid);
958 				int pagecreate;
959 
960 				mutex_enter(&np->r_statelock);
961 				pagecreate = (pon == 0) && (pn == PAGESIZE ||
962 				    uiop->uio_loffset + pn >= np->r_size);
963 				mutex_exit(&np->r_statelock);
964 
965 				base = segmap_getmapflt(segkmap, vp, off + on,
966 				    pn, !pagecreate, S_WRITE);
967 
968 				error = smbfs_writenp(np, base + pon, n, uiop,
969 				    pagecreate);
970 
971 			} else {
972 				base = segmap_getmapflt(segkmap, vp, off + on,
973 				    n, 0, S_READ);
974 				error = smbfs_writenp(np, base + on, n, uiop, 0);
975 			}
976 		}
977 
978 		if (!error) {
979 			if (smi->smi_flags & SMI_NOAC)
980 				flags = SM_WRITE;
981 			else if ((uiop->uio_loffset % bsize) == 0 ||
982 			    IS_SWAPVP(vp)) {
983 				/*
984 				 * Have written a whole block.
985 				 * Start an asynchronous write
986 				 * and mark the buffer to
987 				 * indicate that it won't be
988 				 * needed again soon.
989 				 */
990 				flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
991 			} else
992 				flags = 0;
993 			if ((ioflag & (FSYNC|FDSYNC)) ||
994 			    (np->r_flags & ROUTOFSPACE)) {
995 				flags &= ~SM_ASYNC;
996 				flags |= SM_WRITE;
997 			}
998 			if (vpm_enable) {
999 				error = vpm_sync_pages(vp, off, n, flags);
1000 			} else {
1001 				error = segmap_release(segkmap, base, flags);
1002 			}
1003 		} else {
1004 			if (vpm_enable) {
1005 				(void) vpm_sync_pages(vp, off, n, 0);
1006 			} else {
1007 				(void) segmap_release(segkmap, base, 0);
1008 			}
1009 			/*
1010 			 * In the event that we got an access error while
1011 			 * faulting in a page for a write-only file just
1012 			 * force a write.
1013 			 */
1014 			if (error == EACCES)
1015 				goto smbfs_fwrite;
1016 		}
1017 	} while (!error && uiop->uio_resid > 0);
1018 #else	// _KERNEL
1019 	last_resid = uiop->uio_resid;
1020 	last_off = uiop->uio_loffset;
1021 	error = ENOSYS;
1022 #endif	// _KERNEL
1023 
1024 bottom:
1025 	/* undo adjustment of resid */
1026 	if (error) {
1027 		uiop->uio_resid = last_resid + past_limit;
1028 		uiop->uio_loffset = last_off;
1029 	} else {
1030 		uiop->uio_resid += past_limit;
1031 	}
1032 
1033 	return (error);
1034 }
1035 
1036 #ifdef	_KERNEL
1037 
1038 /*
1039  * Like nfs_client.c: writerp()
1040  *
1041  * Write by creating pages and uiomove data onto them.
1042  */
1043 
1044 int
1045 smbfs_writenp(smbnode_t *np, caddr_t base, int tcount, struct uio *uio,
1046     int pgcreated)
1047 {
1048 	int		pagecreate;
1049 	int		n;
1050 	int		saved_n;
1051 	caddr_t		saved_base;
1052 	u_offset_t	offset;
1053 	int		error;
1054 	int		sm_error;
1055 	vnode_t		*vp = SMBTOV(np);
1056 
1057 	ASSERT(tcount <= MAXBSIZE && tcount <= uio->uio_resid);
1058 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
1059 	if (!vpm_enable) {
1060 		ASSERT(((uintptr_t)base & MAXBOFFSET) + tcount <= MAXBSIZE);
1061 	}
1062 
1063 	/*
1064 	 * Move bytes in at most PAGESIZE chunks. We must avoid
1065 	 * spanning pages in uiomove() because page faults may cause
1066 	 * the cache to be invalidated out from under us. The r_size is not
1067 	 * updated until after the uiomove. If we push the last page of a
1068 	 * file before r_size is correct, we will lose the data written past
1069 	 * the current (and invalid) r_size.
1070 	 */
1071 	do {
1072 		offset = uio->uio_loffset;
1073 		pagecreate = 0;
1074 
1075 		/*
1076 		 * n is the number of bytes required to satisfy the request
1077 		 *   or the number of bytes to fill out the page.
1078 		 */
1079 		n = (int)MIN((PAGESIZE - (offset & PAGEOFFSET)), tcount);
1080 
1081 		/*
1082 		 * Check to see if we can skip reading in the page
1083 		 * and just allocate the memory.  We can do this
1084 		 * if we are going to rewrite the entire mapping
1085 		 * or if we are going to write to or beyond the current
1086 		 * end of file from the beginning of the mapping.
1087 		 *
1088 		 * The read of r_size is now protected by r_statelock.
1089 		 */
1090 		mutex_enter(&np->r_statelock);
1091 		/*
1092 		 * When pgcreated is nonzero the caller has already done
1093 		 * a segmap_getmapflt with forcefault 0 and S_WRITE. With
1094 		 * segkpm this means we already have at least one page
1095 		 * created and mapped at base.
1096 		 */
1097 		pagecreate = pgcreated ||
1098 		    ((offset & PAGEOFFSET) == 0 &&
1099 		    (n == PAGESIZE || ((offset + n) >= np->r_size)));
1100 
1101 		mutex_exit(&np->r_statelock);
1102 		if (!vpm_enable && pagecreate) {
1103 			/*
1104 			 * The last argument tells segmap_pagecreate() to
1105 			 * always lock the page, as opposed to sometimes
1106 			 * returning with the page locked. This way we avoid a
1107 			 * fault on the ensuing uiomove(), but also
1108 			 * more importantly (to fix bug 1094402) we can
1109 			 * call segmap_fault() to unlock the page in all
1110 			 * cases. An alternative would be to modify
1111 			 * segmap_pagecreate() to tell us when it is
1112 			 * locking a page, but that's a fairly major
1113 			 * interface change.
1114 			 */
1115 			if (pgcreated == 0)
1116 				(void) segmap_pagecreate(segkmap, base,
1117 				    (uint_t)n, 1);
1118 			saved_base = base;
1119 			saved_n = n;
1120 		}
1121 
1122 		/*
1123 		 * The number of bytes of data in the last page can not
1124 		 * be accurately be determined while page is being
1125 		 * uiomove'd to and the size of the file being updated.
1126 		 * Thus, inform threads which need to know accurately
1127 		 * how much data is in the last page of the file.  They
1128 		 * will not do the i/o immediately, but will arrange for
1129 		 * the i/o to happen later when this modify operation
1130 		 * will have finished.
1131 		 */
1132 		ASSERT(!(np->r_flags & RMODINPROGRESS));
1133 		mutex_enter(&np->r_statelock);
1134 		np->r_flags |= RMODINPROGRESS;
1135 		np->r_modaddr = (offset & MAXBMASK);
1136 		mutex_exit(&np->r_statelock);
1137 
1138 		if (vpm_enable) {
1139 			/*
1140 			 * Copy data. If new pages are created, part of
1141 			 * the page that is not written will be initizliazed
1142 			 * with zeros.
1143 			 */
1144 			error = vpm_data_copy(vp, offset, n, uio,
1145 			    !pagecreate, NULL, 0, S_WRITE);
1146 		} else {
1147 			error = uiomove(base, n, UIO_WRITE, uio);
1148 		}
1149 
1150 		/*
1151 		 * r_size is the maximum number of
1152 		 * bytes known to be in the file.
1153 		 * Make sure it is at least as high as the
1154 		 * first unwritten byte pointed to by uio_loffset.
1155 		 */
1156 		mutex_enter(&np->r_statelock);
1157 		if (np->r_size < uio->uio_loffset)
1158 			np->r_size = uio->uio_loffset;
1159 		np->r_flags &= ~RMODINPROGRESS;
1160 		np->r_flags |= RDIRTY;
1161 		mutex_exit(&np->r_statelock);
1162 
1163 		/* n = # of bytes written */
1164 		n = (int)(uio->uio_loffset - offset);
1165 
1166 		if (!vpm_enable) {
1167 			base += n;
1168 		}
1169 		tcount -= n;
1170 		/*
1171 		 * If we created pages w/o initializing them completely,
1172 		 * we need to zero the part that wasn't set up.
1173 		 * This happens on a most EOF write cases and if
1174 		 * we had some sort of error during the uiomove.
1175 		 */
1176 		if (!vpm_enable && pagecreate) {
1177 			if ((uio->uio_loffset & PAGEOFFSET) || n == 0)
1178 				(void) kzero(base, PAGESIZE - n);
1179 
1180 			if (pgcreated) {
1181 				/*
1182 				 * Caller is responsible for this page,
1183 				 * it was not created in this loop.
1184 				 */
1185 				pgcreated = 0;
1186 			} else {
1187 				/*
1188 				 * For bug 1094402: segmap_pagecreate locks
1189 				 * page. Unlock it. This also unlocks the
1190 				 * pages allocated by page_create_va() in
1191 				 * segmap_pagecreate().
1192 				 */
1193 				sm_error = segmap_fault(kas.a_hat, segkmap,
1194 				    saved_base, saved_n,
1195 				    F_SOFTUNLOCK, S_WRITE);
1196 				if (error == 0)
1197 					error = sm_error;
1198 			}
1199 		}
1200 	} while (tcount > 0 && error == 0);
1201 
1202 	return (error);
1203 }
1204 
1205 /*
1206  * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED}
1207  * Like nfs3_rdwrlbn()
1208  */
1209 static int
1210 smbfs_rdwrlbn(vnode_t *vp, page_t *pp, u_offset_t off, size_t len,
1211 	int flags, cred_t *cr)
1212 {
1213 	smbmntinfo_t	*smi = VTOSMI(vp);
1214 	struct buf *bp;
1215 	int error;
1216 	int sync;
1217 
1218 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1219 		return (EIO);
1220 
1221 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1222 		return (EIO);
1223 
1224 	bp = pageio_setup(pp, len, vp, flags);
1225 	ASSERT(bp != NULL);
1226 
1227 	/*
1228 	 * pageio_setup should have set b_addr to 0.  This
1229 	 * is correct since we want to do I/O on a page
1230 	 * boundary.  bp_mapin will use this addr to calculate
1231 	 * an offset, and then set b_addr to the kernel virtual
1232 	 * address it allocated for us.
1233 	 */
1234 	ASSERT(bp->b_un.b_addr == 0);
1235 
1236 	bp->b_edev = 0;
1237 	bp->b_dev = 0;
1238 	bp->b_lblkno = lbtodb(off);
1239 	bp->b_file = vp;
1240 	bp->b_offset = (offset_t)off;
1241 	bp_mapin(bp);
1242 
1243 	/*
1244 	 * Calculate the desired level of stability to write data.
1245 	 */
1246 	if ((flags & (B_WRITE|B_ASYNC)) == (B_WRITE|B_ASYNC) &&
1247 	    freemem > desfree) {
1248 		sync = 0;
1249 	} else {
1250 		sync = 1;
1251 	}
1252 
1253 	error = smbfs_bio(bp, sync, cr);
1254 
1255 	bp_mapout(bp);
1256 	pageio_done(bp);
1257 
1258 	return (error);
1259 }
1260 
1261 
1262 /*
1263  * Corresponds to nfs3_vnopc.c : nfs3_bio(), though the NFS code
1264  * uses nfs3read()/nfs3write() where we use smb_rwuio().  Also,
1265  * NFS has this later in the file.  Move it up here closer to
1266  * the one call site just above.
1267  */
1268 
1269 static int
1270 smbfs_bio(struct buf *bp, int sync, cred_t *cr)
1271 {
1272 	struct iovec aiov[1];
1273 	struct uio  auio;
1274 	struct smb_cred scred;
1275 	smbnode_t *np = VTOSMB(bp->b_vp);
1276 	smbmntinfo_t *smi = np->n_mount;
1277 	smb_share_t *ssp = smi->smi_share;
1278 	offset_t offset;
1279 	offset_t endoff;
1280 	size_t count;
1281 	size_t past_eof;
1282 	int error;
1283 
1284 	ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
1285 
1286 	offset = ldbtob(bp->b_lblkno);
1287 	count = bp->b_bcount;
1288 	endoff = offset + count;
1289 	if (offset < 0 || endoff < 0)
1290 		return (EINVAL);
1291 
1292 	/*
1293 	 * Limit file I/O to the remaining file size, but see
1294 	 * the notes in smbfs_getpage about SMBFS_EOF.
1295 	 */
1296 	mutex_enter(&np->r_statelock);
1297 	if (offset >= np->r_size) {
1298 		mutex_exit(&np->r_statelock);
1299 		if (bp->b_flags & B_READ) {
1300 			return (SMBFS_EOF);
1301 		} else {
1302 			return (EINVAL);
1303 		}
1304 	}
1305 	if (endoff > np->r_size) {
1306 		past_eof = (size_t)(endoff - np->r_size);
1307 		count -= past_eof;
1308 	} else
1309 		past_eof = 0;
1310 	mutex_exit(&np->r_statelock);
1311 	ASSERT(count > 0);
1312 
1313 	/* Caller did bpmapin().  Mapped address is... */
1314 	aiov[0].iov_base = bp->b_un.b_addr;
1315 	aiov[0].iov_len = count;
1316 	auio.uio_iov = aiov;
1317 	auio.uio_iovcnt = 1;
1318 	auio.uio_loffset = offset;
1319 	auio.uio_segflg = UIO_SYSSPACE;
1320 	auio.uio_fmode = 0;
1321 	auio.uio_resid = count;
1322 
1323 	/* Shared lock for n_fid use in smb_rwuio */
1324 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER,
1325 	    smi->smi_flags & SMI_INT))
1326 		return (EINTR);
1327 	smb_credinit(&scred, cr);
1328 
1329 	DTRACE_IO1(start, struct buf *, bp);
1330 
1331 	if (bp->b_flags & B_READ) {
1332 
1333 		/* After reconnect, n_fid is invalid */
1334 		if (np->n_vcgenid != ssp->ss_vcgenid)
1335 			error = ESTALE;
1336 		else
1337 			error = smb_rwuio(ssp, np->n_fid, UIO_READ,
1338 			    &auio, &scred, smb_timo_read);
1339 
1340 		/* Like NFS, only set b_error here. */
1341 		bp->b_error = error;
1342 		bp->b_resid = auio.uio_resid;
1343 
1344 		if (!error && auio.uio_resid != 0)
1345 			error = EIO;
1346 		if (!error && past_eof != 0) {
1347 			/* Zero the memory beyond EOF. */
1348 			bzero(bp->b_un.b_addr + count, past_eof);
1349 		}
1350 	} else {
1351 
1352 		/* After reconnect, n_fid is invalid */
1353 		if (np->n_vcgenid != ssp->ss_vcgenid)
1354 			error = ESTALE;
1355 		else
1356 			error = smb_rwuio(ssp, np->n_fid, UIO_WRITE,
1357 			    &auio, &scred, smb_timo_write);
1358 
1359 		/* Like NFS, only set b_error here. */
1360 		bp->b_error = error;
1361 		bp->b_resid = auio.uio_resid;
1362 
1363 		if (!error && auio.uio_resid != 0)
1364 			error = EIO;
1365 		if (!error && sync) {
1366 			(void) smbfs_smb_flush(np, &scred);
1367 		}
1368 	}
1369 
1370 	/*
1371 	 * This comes from nfs3_commit()
1372 	 */
1373 	if (error != 0) {
1374 		mutex_enter(&np->r_statelock);
1375 		if (error == ESTALE)
1376 			np->r_flags |= RSTALE;
1377 		if (!np->r_error)
1378 			np->r_error = error;
1379 		mutex_exit(&np->r_statelock);
1380 		bp->b_flags |= B_ERROR;
1381 	}
1382 
1383 	DTRACE_IO1(done, struct buf *, bp);
1384 
1385 	smb_credrele(&scred);
1386 	smbfs_rw_exit(&np->r_lkserlock);
1387 
1388 	if (error == ESTALE)
1389 		smbfs_attrcache_remove(np);
1390 
1391 	return (error);
1392 }
1393 #endif	// _KERNEL
1394 
1395 /*
1396  * Here NFS has: nfs3write, nfs3read
1397  * We use smb_rwuio instead.
1398  */
1399 
1400 /* ARGSUSED */
1401 static int
1402 smbfs_ioctl(vnode_t *vp, int cmd, intptr_t arg, int flag,
1403 	cred_t *cr, int *rvalp,	caller_context_t *ct)
1404 {
1405 	int		error;
1406 	smbmntinfo_t	*smi;
1407 
1408 	smi = VTOSMI(vp);
1409 
1410 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1411 		return (EIO);
1412 
1413 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1414 		return (EIO);
1415 
1416 	switch (cmd) {
1417 
1418 	case _FIOFFS:
1419 		error = smbfs_fsync(vp, 0, cr, ct);
1420 		break;
1421 
1422 		/*
1423 		 * The following two ioctls are used by bfu.
1424 		 * Silently ignore to avoid bfu errors.
1425 		 */
1426 	case _FIOGDIO:
1427 	case _FIOSDIO:
1428 		error = 0;
1429 		break;
1430 
1431 #if 0	/* Todo - SMB ioctl query regions */
1432 	case _FIO_SEEK_DATA:
1433 	case _FIO_SEEK_HOLE:
1434 #endif
1435 
1436 	case _FIODIRECTIO:
1437 		error = smbfs_directio(vp, (int)arg, cr);
1438 		break;
1439 
1440 		/*
1441 		 * Allow get/set with "raw" security descriptor (SD) data.
1442 		 * Useful for testing, diagnosing idmap problems, etc.
1443 		 */
1444 	case SMBFSIO_GETSD:
1445 		error = smbfs_acl_iocget(vp, arg, flag, cr);
1446 		break;
1447 
1448 	case SMBFSIO_SETSD:
1449 		error = smbfs_acl_iocset(vp, arg, flag, cr);
1450 		break;
1451 
1452 	default:
1453 		error = ENOTTY;
1454 		break;
1455 	}
1456 
1457 	return (error);
1458 }
1459 
1460 
1461 /*
1462  * Return either cached or remote attributes. If get remote attr
1463  * use them to check and invalidate caches, then cache the new attributes.
1464  */
1465 /* ARGSUSED */
1466 static int
1467 smbfs_getattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1468 	caller_context_t *ct)
1469 {
1470 	smbnode_t *np;
1471 	smbmntinfo_t *smi;
1472 	int error;
1473 
1474 	smi = VTOSMI(vp);
1475 
1476 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1477 		return (EIO);
1478 
1479 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1480 		return (EIO);
1481 
1482 	/*
1483 	 * If it has been specified that the return value will
1484 	 * just be used as a hint, and we are only being asked
1485 	 * for size, fsid or rdevid, then return the client's
1486 	 * notion of these values without checking to make sure
1487 	 * that the attribute cache is up to date.
1488 	 * The whole point is to avoid an over the wire GETATTR
1489 	 * call.
1490 	 */
1491 	np = VTOSMB(vp);
1492 	if (flags & ATTR_HINT) {
1493 		if (vap->va_mask ==
1494 		    (vap->va_mask & (AT_SIZE | AT_FSID | AT_RDEV))) {
1495 			mutex_enter(&np->r_statelock);
1496 			if (vap->va_mask | AT_SIZE)
1497 				vap->va_size = np->r_size;
1498 			if (vap->va_mask | AT_FSID)
1499 				vap->va_fsid = vp->v_vfsp->vfs_dev;
1500 			if (vap->va_mask | AT_RDEV)
1501 				vap->va_rdev = vp->v_rdev;
1502 			mutex_exit(&np->r_statelock);
1503 			return (0);
1504 		}
1505 	}
1506 
1507 	/*
1508 	 * Only need to flush pages if asking for the mtime
1509 	 * and if there any dirty pages.
1510 	 *
1511 	 * Here NFS also checks for async writes (np->r_awcount)
1512 	 */
1513 	if (vap->va_mask & AT_MTIME) {
1514 		if (vn_has_cached_data(vp) &&
1515 		    ((np->r_flags & RDIRTY) != 0)) {
1516 			mutex_enter(&np->r_statelock);
1517 			np->r_gcount++;
1518 			mutex_exit(&np->r_statelock);
1519 			error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
1520 			mutex_enter(&np->r_statelock);
1521 			if (error && (error == ENOSPC || error == EDQUOT)) {
1522 				if (!np->r_error)
1523 					np->r_error = error;
1524 			}
1525 			if (--np->r_gcount == 0)
1526 				cv_broadcast(&np->r_cv);
1527 			mutex_exit(&np->r_statelock);
1528 		}
1529 	}
1530 
1531 	return (smbfsgetattr(vp, vap, cr));
1532 }
1533 
1534 /* smbfsgetattr() in smbfs_client.c */
1535 
1536 /*ARGSUSED4*/
1537 static int
1538 smbfs_setattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr,
1539 		caller_context_t *ct)
1540 {
1541 	vfs_t		*vfsp;
1542 	smbmntinfo_t	*smi;
1543 	int		error;
1544 	uint_t		mask;
1545 	struct vattr	oldva;
1546 
1547 	vfsp = vp->v_vfsp;
1548 	smi = VFTOSMI(vfsp);
1549 
1550 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1551 		return (EIO);
1552 
1553 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
1554 		return (EIO);
1555 
1556 	mask = vap->va_mask;
1557 	if (mask & AT_NOSET)
1558 		return (EINVAL);
1559 
1560 	if (vfsp->vfs_flag & VFS_RDONLY)
1561 		return (EROFS);
1562 
1563 	/*
1564 	 * This is a _local_ access check so that only the owner of
1565 	 * this mount can set attributes.  With ACLs enabled, the
1566 	 * file owner can be different from the mount owner, and we
1567 	 * need to check the _mount_ owner here.  See _access_rwx
1568 	 */
1569 	bzero(&oldva, sizeof (oldva));
1570 	oldva.va_mask = AT_TYPE | AT_MODE;
1571 	error = smbfsgetattr(vp, &oldva, cr);
1572 	if (error)
1573 		return (error);
1574 	oldva.va_mask |= AT_UID | AT_GID;
1575 	oldva.va_uid = smi->smi_uid;
1576 	oldva.va_gid = smi->smi_gid;
1577 
1578 	error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
1579 	    smbfs_accessx, vp);
1580 	if (error)
1581 		return (error);
1582 
1583 	if (mask & (AT_UID | AT_GID)) {
1584 		if (smi->smi_flags & SMI_ACL)
1585 			error = smbfs_acl_setids(vp, vap, cr);
1586 		else
1587 			error = ENOSYS;
1588 		if (error != 0) {
1589 			SMBVDEBUG("error %d seting UID/GID on %s",
1590 			    error, VTOSMB(vp)->n_rpath);
1591 			/*
1592 			 * It might be more correct to return the
1593 			 * error here, but that causes complaints
1594 			 * when root extracts a cpio archive, etc.
1595 			 * So ignore this error, and go ahead with
1596 			 * the rest of the setattr work.
1597 			 */
1598 		}
1599 	}
1600 
1601 	error = smbfssetattr(vp, vap, flags, cr);
1602 
1603 #ifdef	SMBFS_VNEVENT
1604 	if (error == 0 && (vap->va_mask & AT_SIZE) && vap->va_size == 0)
1605 		vnevent_truncate(vp, ct);
1606 #endif
1607 
1608 	return (error);
1609 }
1610 
1611 /*
1612  * Mostly from Darwin smbfs_setattr()
1613  * but then modified a lot.
1614  */
1615 /* ARGSUSED */
1616 static int
1617 smbfssetattr(vnode_t *vp, struct vattr *vap, int flags, cred_t *cr)
1618 {
1619 	int		error = 0;
1620 	smbnode_t	*np = VTOSMB(vp);
1621 	uint_t		mask = vap->va_mask;
1622 	struct timespec	*mtime, *atime;
1623 	struct smb_cred	scred;
1624 	int		cerror, modified = 0;
1625 	unsigned short	fid;
1626 	int have_fid = 0;
1627 	uint32_t rights = 0;
1628 	uint32_t dosattr = 0;
1629 
1630 	ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
1631 
1632 	/*
1633 	 * There are no settable attributes on the XATTR dir,
1634 	 * so just silently ignore these.  On XATTR files,
1635 	 * you can set the size but nothing else.
1636 	 */
1637 	if (vp->v_flag & V_XATTRDIR)
1638 		return (0);
1639 	if (np->n_flag & N_XATTR) {
1640 		if (mask & AT_TIMES)
1641 			SMBVDEBUG("ignore set time on xattr\n");
1642 		mask &= AT_SIZE;
1643 	}
1644 
1645 	/*
1646 	 * Only need to flush pages if there are any pages and
1647 	 * if the file is marked as dirty in some fashion.  The
1648 	 * file must be flushed so that we can accurately
1649 	 * determine the size of the file and the cached data
1650 	 * after the SETATTR returns.  A file is considered to
1651 	 * be dirty if it is either marked with RDIRTY, has
1652 	 * outstanding i/o's active, or is mmap'd.  In this
1653 	 * last case, we can't tell whether there are dirty
1654 	 * pages, so we flush just to be sure.
1655 	 */
1656 	if (vn_has_cached_data(vp) &&
1657 	    ((np->r_flags & RDIRTY) ||
1658 	    np->r_count > 0 ||
1659 	    np->r_mapcnt > 0)) {
1660 		ASSERT(vp->v_type != VCHR);
1661 		error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, NULL);
1662 		if (error && (error == ENOSPC || error == EDQUOT)) {
1663 			mutex_enter(&np->r_statelock);
1664 			if (!np->r_error)
1665 				np->r_error = error;
1666 			mutex_exit(&np->r_statelock);
1667 		}
1668 	}
1669 
1670 	/*
1671 	 * If our caller is trying to set multiple attributes, they
1672 	 * can make no assumption about what order they are done in.
1673 	 * Here we try to do them in order of decreasing likelihood
1674 	 * of failure, just to minimize the chance we'll wind up
1675 	 * with a partially complete request.
1676 	 */
1677 
1678 	/* Shared lock for (possible) n_fid use. */
1679 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
1680 		return (EINTR);
1681 	smb_credinit(&scred, cr);
1682 
1683 	/*
1684 	 * If the caller has provided extensible attributes,
1685 	 * map those into DOS attributes supported by SMB.
1686 	 * Note: zero means "no change".
1687 	 */
1688 	if (mask & AT_XVATTR)
1689 		dosattr = xvattr_to_dosattr(np, vap);
1690 
1691 	/*
1692 	 * Will we need an open handle for this setattr?
1693 	 * If so, what rights will we need?
1694 	 */
1695 	if (dosattr || (mask & (AT_ATIME | AT_MTIME))) {
1696 		rights |=
1697 		    SA_RIGHT_FILE_WRITE_ATTRIBUTES;
1698 	}
1699 	if (mask & AT_SIZE) {
1700 		rights |=
1701 		    SA_RIGHT_FILE_WRITE_DATA |
1702 		    SA_RIGHT_FILE_APPEND_DATA;
1703 	}
1704 
1705 	/*
1706 	 * Only SIZE really requires a handle, but it's
1707 	 * simpler and more reliable to set via a handle.
1708 	 * Some servers like NT4 won't set times by path.
1709 	 * Also, we're usually setting everything anyway.
1710 	 */
1711 	if (rights != 0) {
1712 		error = smbfs_smb_tmpopen(np, rights, &scred, &fid);
1713 		if (error) {
1714 			SMBVDEBUG("error %d opening %s\n",
1715 			    error, np->n_rpath);
1716 			goto out;
1717 		}
1718 		have_fid = 1;
1719 	}
1720 
1721 	/*
1722 	 * If the server supports the UNIX extensions, right here is where
1723 	 * we'd support changes to uid, gid, mode, and possibly va_flags.
1724 	 * For now we claim to have made any such changes.
1725 	 */
1726 
1727 	if (mask & AT_SIZE) {
1728 		/*
1729 		 * If the new file size is less than what the client sees as
1730 		 * the file size, then just change the size and invalidate
1731 		 * the pages.
1732 		 */
1733 
1734 		/*
1735 		 * Set the file size to vap->va_size.
1736 		 */
1737 		ASSERT(have_fid);
1738 		error = smbfs_smb_setfsize(np, fid, vap->va_size, &scred);
1739 		if (error) {
1740 			SMBVDEBUG("setsize error %d file %s\n",
1741 			    error, np->n_rpath);
1742 		} else {
1743 			/*
1744 			 * Darwin had code here to zero-extend.
1745 			 * Tests indicate the server will zero-fill,
1746 			 * so looks like we don't need to do that.
1747 			 */
1748 			mutex_enter(&np->r_statelock);
1749 			np->r_size = vap->va_size;
1750 			mutex_exit(&np->r_statelock);
1751 			modified = 1;
1752 		}
1753 	}
1754 
1755 	/*
1756 	 * Todo: Implement setting create_time (which is
1757 	 * different from ctime).
1758 	 */
1759 	mtime = ((mask & AT_MTIME) ? &vap->va_mtime : 0);
1760 	atime = ((mask & AT_ATIME) ? &vap->va_atime : 0);
1761 
1762 	if (dosattr || mtime || atime) {
1763 		/*
1764 		 * Always use the handle-based set attr call now.
1765 		 */
1766 		ASSERT(have_fid);
1767 		error = smbfs_smb_setfattr(np, fid,
1768 		    dosattr, mtime, atime, &scred);
1769 		if (error) {
1770 			SMBVDEBUG("set times error %d file %s\n",
1771 			    error, np->n_rpath);
1772 		} else {
1773 			modified = 1;
1774 		}
1775 	}
1776 
1777 out:
1778 	if (have_fid) {
1779 		cerror = smbfs_smb_tmpclose(np, fid, &scred);
1780 		if (cerror)
1781 			SMBVDEBUG("error %d closing %s\n",
1782 			    cerror, np->n_rpath);
1783 	}
1784 
1785 	smb_credrele(&scred);
1786 	smbfs_rw_exit(&np->r_lkserlock);
1787 
1788 	if (modified) {
1789 		/*
1790 		 * Invalidate attribute cache in case the server
1791 		 * doesn't set exactly the attributes we asked.
1792 		 */
1793 		smbfs_attrcache_remove(np);
1794 
1795 		/*
1796 		 * If changing the size of the file, invalidate
1797 		 * any local cached data which is no longer part
1798 		 * of the file.  We also possibly invalidate the
1799 		 * last page in the file.  We could use
1800 		 * pvn_vpzero(), but this would mark the page as
1801 		 * modified and require it to be written back to
1802 		 * the server for no particularly good reason.
1803 		 * This way, if we access it, then we bring it
1804 		 * back in.  A read should be cheaper than a
1805 		 * write.
1806 		 */
1807 		if (mask & AT_SIZE) {
1808 			smbfs_invalidate_pages(vp,
1809 			    (vap->va_size & PAGEMASK), cr);
1810 		}
1811 	}
1812 
1813 	return (error);
1814 }
1815 
1816 /*
1817  * Helper function for extensible system attributes (PSARC 2007/315)
1818  * Compute the DOS attribute word to pass to _setfattr (see above).
1819  * This returns zero IFF no change is being made to attributes.
1820  * Otherwise return the new attributes or SMB_EFA_NORMAL.
1821  */
1822 static uint32_t
1823 xvattr_to_dosattr(smbnode_t *np, struct vattr *vap)
1824 {
1825 	xvattr_t *xvap = (xvattr_t *)vap;
1826 	xoptattr_t *xoap = NULL;
1827 	uint32_t attr = np->r_attr.fa_attr;
1828 	boolean_t anyset = B_FALSE;
1829 
1830 	if ((xoap = xva_getxoptattr(xvap)) == NULL)
1831 		return (0);
1832 
1833 	if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
1834 		if (xoap->xoa_archive)
1835 			attr |= SMB_FA_ARCHIVE;
1836 		else
1837 			attr &= ~SMB_FA_ARCHIVE;
1838 		XVA_SET_RTN(xvap, XAT_ARCHIVE);
1839 		anyset = B_TRUE;
1840 	}
1841 	if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
1842 		if (xoap->xoa_system)
1843 			attr |= SMB_FA_SYSTEM;
1844 		else
1845 			attr &= ~SMB_FA_SYSTEM;
1846 		XVA_SET_RTN(xvap, XAT_SYSTEM);
1847 		anyset = B_TRUE;
1848 	}
1849 	if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
1850 		if (xoap->xoa_readonly)
1851 			attr |= SMB_FA_RDONLY;
1852 		else
1853 			attr &= ~SMB_FA_RDONLY;
1854 		XVA_SET_RTN(xvap, XAT_READONLY);
1855 		anyset = B_TRUE;
1856 	}
1857 	if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
1858 		if (xoap->xoa_hidden)
1859 			attr |= SMB_FA_HIDDEN;
1860 		else
1861 			attr &= ~SMB_FA_HIDDEN;
1862 		XVA_SET_RTN(xvap, XAT_HIDDEN);
1863 		anyset = B_TRUE;
1864 	}
1865 
1866 	if (anyset == B_FALSE)
1867 		return (0);	/* no change */
1868 	if (attr == 0)
1869 		attr = SMB_EFA_NORMAL;
1870 
1871 	return (attr);
1872 }
1873 
1874 /*
1875  * smbfs_access_rwx()
1876  * Common function for smbfs_access, etc.
1877  *
1878  * The security model implemented by the FS is unusual
1879  * due to the current "single user mounts" restriction:
1880  * All access under a given mount point uses the CIFS
1881  * credentials established by the owner of the mount.
1882  *
1883  * Most access checking is handled by the CIFS server,
1884  * but we need sufficient Unix access checks here to
1885  * prevent other local Unix users from having access
1886  * to objects under this mount that the uid/gid/mode
1887  * settings in the mount would not allow.
1888  *
1889  * With this model, there is a case where we need the
1890  * ability to do an access check before we have the
1891  * vnode for an object.  This function takes advantage
1892  * of the fact that the uid/gid/mode is per mount, and
1893  * avoids the need for a vnode.
1894  *
1895  * We still (sort of) need a vnode when we call
1896  * secpolicy_vnode_access, but that only uses
1897  * the vtype field, so we can use a pair of fake
1898  * vnodes that have only v_type filled in.
1899  */
1900 static int
1901 smbfs_access_rwx(vfs_t *vfsp, int vtype, int mode, cred_t *cr)
1902 {
1903 	/* See the secpolicy call below. */
1904 	static const vnode_t tmpl_vdir = { .v_type = VDIR };
1905 	static const vnode_t tmpl_vreg = { .v_type = VREG };
1906 	vattr_t		va;
1907 	vnode_t		*tvp;
1908 	struct smbmntinfo *smi = VFTOSMI(vfsp);
1909 	int shift = 0;
1910 
1911 	/*
1912 	 * Build our (fabricated) vnode attributes.
1913 	 */
1914 	bzero(&va, sizeof (va));
1915 	va.va_mask = AT_TYPE | AT_MODE | AT_UID | AT_GID;
1916 	va.va_type = vtype;
1917 	va.va_mode = (vtype == VDIR) ?
1918 	    smi->smi_dmode : smi->smi_fmode;
1919 	va.va_uid = smi->smi_uid;
1920 	va.va_gid = smi->smi_gid;
1921 
1922 	/*
1923 	 * Disallow write attempts on read-only file systems,
1924 	 * unless the file is a device or fifo node.  Note:
1925 	 * Inline vn_is_readonly and IS_DEVVP here because
1926 	 * we may not have a vnode ptr.  Original expr. was:
1927 	 * (mode & VWRITE) && vn_is_readonly(vp) && !IS_DEVVP(vp))
1928 	 */
1929 	if ((mode & VWRITE) &&
1930 	    (vfsp->vfs_flag & VFS_RDONLY) &&
1931 	    !(vtype == VCHR || vtype == VBLK || vtype == VFIFO))
1932 		return (EROFS);
1933 
1934 	/*
1935 	 * Disallow attempts to access mandatory lock files.
1936 	 * Similarly, expand MANDLOCK here.
1937 	 */
1938 	if ((mode & (VWRITE | VREAD | VEXEC)) &&
1939 	    va.va_type == VREG && MANDMODE(va.va_mode))
1940 		return (EACCES);
1941 
1942 	/*
1943 	 * Access check is based on only
1944 	 * one of owner, group, public.
1945 	 * If not owner, then check group.
1946 	 * If not a member of the group,
1947 	 * then check public access.
1948 	 */
1949 	if (crgetuid(cr) != va.va_uid) {
1950 		shift += 3;
1951 		if (!groupmember(va.va_gid, cr))
1952 			shift += 3;
1953 	}
1954 
1955 	/*
1956 	 * We need a vnode for secpolicy_vnode_access,
1957 	 * but the only thing it looks at is v_type,
1958 	 * so pass one of the templates above.
1959 	 */
1960 	tvp = (va.va_type == VDIR) ?
1961 	    (vnode_t *)&tmpl_vdir :
1962 	    (vnode_t *)&tmpl_vreg;
1963 
1964 	return (secpolicy_vnode_access2(cr, tvp, va.va_uid,
1965 	    va.va_mode << shift, mode));
1966 }
1967 
1968 /*
1969  * See smbfs_setattr
1970  */
1971 static int
1972 smbfs_accessx(void *arg, int mode, cred_t *cr)
1973 {
1974 	vnode_t *vp = arg;
1975 	/*
1976 	 * Note: The caller has checked the current zone,
1977 	 * the SMI_DEAD and VFS_UNMOUNTED flags, etc.
1978 	 */
1979 	return (smbfs_access_rwx(vp->v_vfsp, vp->v_type, mode, cr));
1980 }
1981 
1982 /*
1983  * XXX
1984  * This op should support PSARC 2007/403, Modified Access Checks for CIFS
1985  */
1986 /* ARGSUSED */
1987 static int
1988 smbfs_access(vnode_t *vp, int mode, int flags, cred_t *cr, caller_context_t *ct)
1989 {
1990 	vfs_t		*vfsp;
1991 	smbmntinfo_t	*smi;
1992 
1993 	vfsp = vp->v_vfsp;
1994 	smi = VFTOSMI(vfsp);
1995 
1996 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
1997 		return (EIO);
1998 
1999 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
2000 		return (EIO);
2001 
2002 	return (smbfs_access_rwx(vfsp, vp->v_type, mode, cr));
2003 }
2004 
2005 
2006 /* ARGSUSED */
2007 static int
2008 smbfs_readlink(vnode_t *vp, struct uio *uiop, cred_t *cr, caller_context_t *ct)
2009 {
2010 	/* Not yet... */
2011 	return (ENOSYS);
2012 }
2013 
2014 
2015 /*
2016  * Flush local dirty pages to stable storage on the server.
2017  *
2018  * If FNODSYNC is specified, then there is nothing to do because
2019  * metadata changes are not cached on the client before being
2020  * sent to the server.
2021  */
2022 /* ARGSUSED */
2023 static int
2024 smbfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct)
2025 {
2026 	int		error = 0;
2027 	smbmntinfo_t	*smi;
2028 	smbnode_t	*np;
2029 	struct smb_cred scred;
2030 
2031 	np = VTOSMB(vp);
2032 	smi = VTOSMI(vp);
2033 
2034 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2035 		return (EIO);
2036 
2037 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2038 		return (EIO);
2039 
2040 	if ((syncflag & FNODSYNC) || IS_SWAPVP(vp))
2041 		return (0);
2042 
2043 	if ((syncflag & (FSYNC|FDSYNC)) == 0)
2044 		return (0);
2045 
2046 	error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
2047 	if (error)
2048 		return (error);
2049 
2050 	/* Shared lock for n_fid use in _flush */
2051 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
2052 		return (EINTR);
2053 	smb_credinit(&scred, cr);
2054 
2055 	error = smbfs_smb_flush(np, &scred);
2056 
2057 	smb_credrele(&scred);
2058 	smbfs_rw_exit(&np->r_lkserlock);
2059 
2060 	return (error);
2061 }
2062 
2063 /*
2064  * Last reference to vnode went away.
2065  */
2066 /* ARGSUSED */
2067 static void
2068 smbfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
2069 {
2070 	struct smb_cred scred;
2071 	smbnode_t	*np = VTOSMB(vp);
2072 	int error;
2073 
2074 	/*
2075 	 * Don't "bail out" for VFS_UNMOUNTED here,
2076 	 * as we want to do cleanup, etc.
2077 	 * See also pcfs_inactive
2078 	 */
2079 
2080 	/*
2081 	 * If this is coming from the wrong zone, we let someone in the right
2082 	 * zone take care of it asynchronously.  We can get here due to
2083 	 * VN_RELE() being called from pageout() or fsflush().  This call may
2084 	 * potentially turn into an expensive no-op if, for instance, v_count
2085 	 * gets incremented in the meantime, but it's still correct.
2086 	 */
2087 
2088 	/*
2089 	 * From NFS:rinactive()
2090 	 *
2091 	 * Before freeing anything, wait until all asynchronous
2092 	 * activity is done on this rnode.  This will allow all
2093 	 * asynchronous read ahead and write behind i/o's to
2094 	 * finish.
2095 	 */
2096 	mutex_enter(&np->r_statelock);
2097 	while (np->r_count > 0)
2098 		cv_wait(&np->r_cv, &np->r_statelock);
2099 	mutex_exit(&np->r_statelock);
2100 
2101 	/*
2102 	 * Flush and invalidate all pages associated with the vnode.
2103 	 */
2104 	if (vn_has_cached_data(vp)) {
2105 		if ((np->r_flags & RDIRTY) && !np->r_error) {
2106 			error = smbfs_putpage(vp, (u_offset_t)0, 0, 0, cr, ct);
2107 			if (error && (error == ENOSPC || error == EDQUOT)) {
2108 				mutex_enter(&np->r_statelock);
2109 				if (!np->r_error)
2110 					np->r_error = error;
2111 				mutex_exit(&np->r_statelock);
2112 			}
2113 		}
2114 		smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
2115 	}
2116 	/*
2117 	 * This vnode should have lost all cached data.
2118 	 */
2119 	ASSERT(vn_has_cached_data(vp) == 0);
2120 
2121 	/*
2122 	 * Defend against the possibility that higher-level callers
2123 	 * might not correctly balance open and close calls.  If we
2124 	 * get here with open references remaining, it means there
2125 	 * was a missing VOP_CLOSE somewhere.  If that happens, do
2126 	 * the close here so we don't "leak" FIDs on the server.
2127 	 *
2128 	 * Exclusive lock for modifying n_fid stuff.
2129 	 * Don't want this one ever interruptible.
2130 	 */
2131 	(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
2132 	smb_credinit(&scred, cr);
2133 
2134 	switch (np->n_ovtype) {
2135 	case VNON:
2136 		/* not open (OK) */
2137 		break;
2138 
2139 	case VDIR:
2140 		if (np->n_dirrefs == 0)
2141 			break;
2142 		SMBVDEBUG("open dir: refs %d path %s\n",
2143 		    np->n_dirrefs, np->n_rpath);
2144 		/* Force last close. */
2145 		np->n_dirrefs = 1;
2146 		smbfs_rele_fid(np, &scred);
2147 		break;
2148 
2149 	case VREG:
2150 		if (np->n_fidrefs == 0)
2151 			break;
2152 		SMBVDEBUG("open file: refs %d id 0x%x path %s\n",
2153 		    np->n_fidrefs, np->n_fid, np->n_rpath);
2154 		/* Force last close. */
2155 		np->n_fidrefs = 1;
2156 		smbfs_rele_fid(np, &scred);
2157 		break;
2158 
2159 	default:
2160 		SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
2161 		np->n_ovtype = VNON;
2162 		break;
2163 	}
2164 
2165 	smb_credrele(&scred);
2166 	smbfs_rw_exit(&np->r_lkserlock);
2167 
2168 	/*
2169 	 * XATTR directories (and the files under them) have
2170 	 * little value for reclaim, so just remove them from
2171 	 * the "hash" (AVL) as soon as they go inactive.
2172 	 * Note that the node may already have been removed
2173 	 * from the hash by smbfsremove.
2174 	 */
2175 	if ((np->n_flag & N_XATTR) != 0 &&
2176 	    (np->r_flags & RHASHED) != 0)
2177 		smbfs_rmhash(np);
2178 
2179 	smbfs_addfree(np);
2180 }
2181 
2182 /*
2183  * Remote file system operations having to do with directory manipulation.
2184  */
2185 /* ARGSUSED */
2186 static int
2187 smbfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp,
2188 	int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
2189 	int *direntflags, pathname_t *realpnp)
2190 {
2191 	vfs_t		*vfs;
2192 	smbmntinfo_t	*smi;
2193 	smbnode_t	*dnp;
2194 	int		error;
2195 
2196 	vfs = dvp->v_vfsp;
2197 	smi = VFTOSMI(vfs);
2198 
2199 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2200 		return (EPERM);
2201 
2202 	if (smi->smi_flags & SMI_DEAD || vfs->vfs_flag & VFS_UNMOUNTED)
2203 		return (EIO);
2204 
2205 	dnp = VTOSMB(dvp);
2206 
2207 	/*
2208 	 * Are we looking up extended attributes?  If so, "dvp" is
2209 	 * the file or directory for which we want attributes, and
2210 	 * we need a lookup of the (faked up) attribute directory
2211 	 * before we lookup the rest of the path.
2212 	 */
2213 	if (flags & LOOKUP_XATTR) {
2214 		/*
2215 		 * Require the xattr mount option.
2216 		 */
2217 		if ((vfs->vfs_flag & VFS_XATTR) == 0)
2218 			return (EINVAL);
2219 
2220 		error = smbfs_get_xattrdir(dvp, vpp, cr, flags);
2221 		return (error);
2222 	}
2223 
2224 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_READER, SMBINTR(dvp)))
2225 		return (EINTR);
2226 
2227 	error = smbfslookup(dvp, nm, vpp, cr, 1, ct);
2228 
2229 	smbfs_rw_exit(&dnp->r_rwlock);
2230 
2231 	return (error);
2232 }
2233 
2234 /* ARGSUSED */
2235 static int
2236 smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
2237 	int cache_ok, caller_context_t *ct)
2238 {
2239 	int		error;
2240 	int		supplen; /* supported length */
2241 	vnode_t		*vp;
2242 	smbnode_t	*np;
2243 	smbnode_t	*dnp;
2244 	smbmntinfo_t	*smi;
2245 	/* struct smb_vc	*vcp; */
2246 	const char	*ill;
2247 	const char	*name = (const char *)nm;
2248 	int		nmlen = strlen(nm);
2249 	int		rplen;
2250 	struct smb_cred scred;
2251 	struct smbfattr fa;
2252 
2253 	smi = VTOSMI(dvp);
2254 	dnp = VTOSMB(dvp);
2255 
2256 	ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone);
2257 
2258 #ifdef NOT_YET
2259 	vcp = SSTOVC(smi->smi_share);
2260 
2261 	/* XXX: Should compute this once and store it in smbmntinfo_t */
2262 	supplen = (SMB_DIALECT(vcp) >= SMB_DIALECT_LANMAN2_0) ? 255 : 12;
2263 #else
2264 	supplen = 255;
2265 #endif
2266 
2267 	/*
2268 	 * RWlock must be held, either reader or writer.
2269 	 */
2270 	ASSERT(dnp->r_rwlock.count != 0);
2271 
2272 	/*
2273 	 * If lookup is for "", just return dvp.
2274 	 * No need to perform any access checks.
2275 	 */
2276 	if (nmlen == 0) {
2277 		VN_HOLD(dvp);
2278 		*vpp = dvp;
2279 		return (0);
2280 	}
2281 
2282 	/*
2283 	 * Can't do lookups in non-directories.
2284 	 */
2285 	if (dvp->v_type != VDIR)
2286 		return (ENOTDIR);
2287 
2288 	/*
2289 	 * Need search permission in the directory.
2290 	 */
2291 	error = smbfs_access(dvp, VEXEC, 0, cr, ct);
2292 	if (error)
2293 		return (error);
2294 
2295 	/*
2296 	 * If lookup is for ".", just return dvp.
2297 	 * Access check was done above.
2298 	 */
2299 	if (nmlen == 1 && name[0] == '.') {
2300 		VN_HOLD(dvp);
2301 		*vpp = dvp;
2302 		return (0);
2303 	}
2304 
2305 	/*
2306 	 * Now some sanity checks on the name.
2307 	 * First check the length.
2308 	 */
2309 	if (nmlen > supplen)
2310 		return (ENAMETOOLONG);
2311 
2312 	/*
2313 	 * Avoid surprises with characters that are
2314 	 * illegal in Windows file names.
2315 	 * Todo: CATIA mappings?
2316 	 */
2317 	ill = illegal_chars;
2318 	if (dnp->n_flag & N_XATTR)
2319 		ill++; /* allow colon */
2320 	if (strpbrk(nm, ill))
2321 		return (EINVAL);
2322 
2323 	/*
2324 	 * Special handling for lookup of ".."
2325 	 *
2326 	 * We keep full pathnames (as seen on the server)
2327 	 * so we can just trim off the last component to
2328 	 * get the full pathname of the parent.  Note:
2329 	 * We don't actually copy and modify, but just
2330 	 * compute the trimmed length and pass that with
2331 	 * the current dir path (not null terminated).
2332 	 *
2333 	 * We don't go over-the-wire to get attributes
2334 	 * for ".." because we know it's a directory,
2335 	 * and we can just leave the rest "stale"
2336 	 * until someone does a getattr.
2337 	 */
2338 	if (nmlen == 2 && name[0] == '.' && name[1] == '.') {
2339 		if (dvp->v_flag & VROOT) {
2340 			/*
2341 			 * Already at the root.  This can happen
2342 			 * with directory listings at the root,
2343 			 * which lookup "." and ".." to get the
2344 			 * inode numbers.  Let ".." be the same
2345 			 * as "." in the FS root.
2346 			 */
2347 			VN_HOLD(dvp);
2348 			*vpp = dvp;
2349 			return (0);
2350 		}
2351 
2352 		/*
2353 		 * Special case for XATTR directory
2354 		 */
2355 		if (dvp->v_flag & V_XATTRDIR) {
2356 			error = smbfs_xa_parent(dvp, vpp);
2357 			return (error);
2358 		}
2359 
2360 		/*
2361 		 * Find the parent path length.
2362 		 */
2363 		rplen = dnp->n_rplen;
2364 		ASSERT(rplen > 0);
2365 		while (--rplen >= 0) {
2366 			if (dnp->n_rpath[rplen] == '\\')
2367 				break;
2368 		}
2369 		if (rplen <= 0) {
2370 			/* Found our way to the root. */
2371 			vp = SMBTOV(smi->smi_root);
2372 			VN_HOLD(vp);
2373 			*vpp = vp;
2374 			return (0);
2375 		}
2376 		np = smbfs_node_findcreate(smi,
2377 		    dnp->n_rpath, rplen, NULL, 0, 0,
2378 		    &smbfs_fattr0); /* force create */
2379 		ASSERT(np != NULL);
2380 		vp = SMBTOV(np);
2381 		vp->v_type = VDIR;
2382 
2383 		/* Success! */
2384 		*vpp = vp;
2385 		return (0);
2386 	}
2387 
2388 	/*
2389 	 * Normal lookup of a name under this directory.
2390 	 * Note we handled "", ".", ".." above.
2391 	 */
2392 	if (cache_ok) {
2393 		/*
2394 		 * The caller indicated that it's OK to use a
2395 		 * cached result for this lookup, so try to
2396 		 * reclaim a node from the smbfs node cache.
2397 		 */
2398 		error = smbfslookup_cache(dvp, nm, nmlen, &vp, cr);
2399 		if (error)
2400 			return (error);
2401 		if (vp != NULL) {
2402 			/* hold taken in lookup_cache */
2403 			*vpp = vp;
2404 			return (0);
2405 		}
2406 	}
2407 
2408 	/*
2409 	 * OK, go over-the-wire to get the attributes,
2410 	 * then create the node.
2411 	 */
2412 	smb_credinit(&scred, cr);
2413 	/* Note: this can allocate a new "name" */
2414 	error = smbfs_smb_lookup(dnp, &name, &nmlen, &fa, &scred);
2415 	smb_credrele(&scred);
2416 	if (error == ENOTDIR) {
2417 		/*
2418 		 * Lookup failed because this directory was
2419 		 * removed or renamed by another client.
2420 		 * Remove any cached attributes under it.
2421 		 */
2422 		smbfs_attrcache_remove(dnp);
2423 		smbfs_attrcache_prune(dnp);
2424 	}
2425 	if (error)
2426 		goto out;
2427 
2428 	error = smbfs_nget(dvp, name, nmlen, &fa, &vp);
2429 	if (error)
2430 		goto out;
2431 
2432 	/* Success! */
2433 	*vpp = vp;
2434 
2435 out:
2436 	/* smbfs_smb_lookup may have allocated name. */
2437 	if (name != nm)
2438 		smbfs_name_free(name, nmlen);
2439 
2440 	return (error);
2441 }
2442 
2443 /*
2444  * smbfslookup_cache
2445  *
2446  * Try to reclaim a node from the smbfs node cache.
2447  * Some statistics for DEBUG.
2448  *
2449  * This mechanism lets us avoid many of the five (or more)
2450  * OtW lookup calls per file seen with "ls -l" if we search
2451  * the smbfs node cache for recently inactive(ated) nodes.
2452  */
2453 #ifdef DEBUG
2454 int smbfs_lookup_cache_calls = 0;
2455 int smbfs_lookup_cache_error = 0;
2456 int smbfs_lookup_cache_miss = 0;
2457 int smbfs_lookup_cache_stale = 0;
2458 int smbfs_lookup_cache_hits = 0;
2459 #endif /* DEBUG */
2460 
2461 /* ARGSUSED */
2462 static int
2463 smbfslookup_cache(vnode_t *dvp, char *nm, int nmlen,
2464 	vnode_t **vpp, cred_t *cr)
2465 {
2466 	struct vattr va;
2467 	smbnode_t *dnp;
2468 	smbnode_t *np;
2469 	vnode_t *vp;
2470 	int error;
2471 	char sep;
2472 
2473 	dnp = VTOSMB(dvp);
2474 	*vpp = NULL;
2475 
2476 #ifdef DEBUG
2477 	smbfs_lookup_cache_calls++;
2478 #endif
2479 
2480 	/*
2481 	 * First make sure we can get attributes for the
2482 	 * directory.  Cached attributes are OK here.
2483 	 * If we removed or renamed the directory, this
2484 	 * will return ENOENT.  If someone else removed
2485 	 * this directory or file, we'll find out when we
2486 	 * try to open or get attributes.
2487 	 */
2488 	va.va_mask = AT_TYPE | AT_MODE;
2489 	error = smbfsgetattr(dvp, &va, cr);
2490 	if (error) {
2491 #ifdef DEBUG
2492 		smbfs_lookup_cache_error++;
2493 #endif
2494 		return (error);
2495 	}
2496 
2497 	/*
2498 	 * Passing NULL smbfattr here so we will
2499 	 * just look, not create.
2500 	 */
2501 	sep = SMBFS_DNP_SEP(dnp);
2502 	np = smbfs_node_findcreate(dnp->n_mount,
2503 	    dnp->n_rpath, dnp->n_rplen,
2504 	    nm, nmlen, sep, NULL);
2505 	if (np == NULL) {
2506 #ifdef DEBUG
2507 		smbfs_lookup_cache_miss++;
2508 #endif
2509 		return (0);
2510 	}
2511 
2512 	/*
2513 	 * Found it.  Attributes still valid?
2514 	 */
2515 	vp = SMBTOV(np);
2516 	if (np->r_attrtime <= gethrtime()) {
2517 		/* stale */
2518 #ifdef DEBUG
2519 		smbfs_lookup_cache_stale++;
2520 #endif
2521 		VN_RELE(vp);
2522 		return (0);
2523 	}
2524 
2525 	/*
2526 	 * Success!
2527 	 * Caller gets hold from smbfs_node_findcreate
2528 	 */
2529 #ifdef DEBUG
2530 	smbfs_lookup_cache_hits++;
2531 #endif
2532 	*vpp = vp;
2533 	return (0);
2534 }
2535 
2536 
2537 /*
2538  * XXX
2539  * vsecattr_t is new to build 77, and we need to eventually support
2540  * it in order to create an ACL when an object is created.
2541  *
2542  * This op should support the new FIGNORECASE flag for case-insensitive
2543  * lookups, per PSARC 2007/244.
2544  */
2545 /* ARGSUSED */
2546 static int
2547 smbfs_create(vnode_t *dvp, char *nm, struct vattr *va, enum vcexcl exclusive,
2548 	int mode, vnode_t **vpp, cred_t *cr, int lfaware, caller_context_t *ct,
2549 	vsecattr_t *vsecp)
2550 {
2551 	int		error;
2552 	int		cerror;
2553 	vfs_t		*vfsp;
2554 	vnode_t		*vp;
2555 	smbnode_t	*np;
2556 	smbnode_t	*dnp;
2557 	smbmntinfo_t	*smi;
2558 	struct vattr	vattr;
2559 	struct smbfattr	fattr;
2560 	struct smb_cred	scred;
2561 	const char *name = (const char *)nm;
2562 	int		nmlen = strlen(nm);
2563 	uint32_t	disp;
2564 	uint16_t	fid;
2565 	int		xattr;
2566 
2567 	vfsp = dvp->v_vfsp;
2568 	smi = VFTOSMI(vfsp);
2569 	dnp = VTOSMB(dvp);
2570 	vp = NULL;
2571 
2572 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2573 		return (EPERM);
2574 
2575 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
2576 		return (EIO);
2577 
2578 	/*
2579 	 * Note: this may break mknod(2) calls to create a directory,
2580 	 * but that's obscure use.  Some other filesystems do this.
2581 	 * Todo: redirect VDIR type here to _mkdir.
2582 	 */
2583 	if (va->va_type != VREG)
2584 		return (EINVAL);
2585 
2586 	/*
2587 	 * If the pathname is "", just use dvp, no checks.
2588 	 * Do this outside of the rwlock (like zfs).
2589 	 */
2590 	if (nmlen == 0) {
2591 		VN_HOLD(dvp);
2592 		*vpp = dvp;
2593 		return (0);
2594 	}
2595 
2596 	/* Don't allow "." or ".." through here. */
2597 	if ((nmlen == 1 && name[0] == '.') ||
2598 	    (nmlen == 2 && name[0] == '.' && name[1] == '.'))
2599 		return (EISDIR);
2600 
2601 	/*
2602 	 * We make a copy of the attributes because the caller does not
2603 	 * expect us to change what va points to.
2604 	 */
2605 	vattr = *va;
2606 
2607 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2608 		return (EINTR);
2609 	smb_credinit(&scred, cr);
2610 
2611 	/*
2612 	 * NFS needs to go over the wire, just to be sure whether the
2613 	 * file exists or not.  Using a cached result is dangerous in
2614 	 * this case when making a decision regarding existence.
2615 	 *
2616 	 * The SMB protocol does NOT really need to go OTW here
2617 	 * thanks to the expressive NTCREATE disposition values.
2618 	 * Unfortunately, to do Unix access checks correctly,
2619 	 * we need to know if the object already exists.
2620 	 * When the object does not exist, we need VWRITE on
2621 	 * the directory.  Note: smbfslookup() checks VEXEC.
2622 	 */
2623 	error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
2624 	if (error == 0) {
2625 		/*
2626 		 * The file already exists.  Error?
2627 		 * NB: have a hold from smbfslookup
2628 		 */
2629 		if (exclusive == EXCL) {
2630 			error = EEXIST;
2631 			VN_RELE(vp);
2632 			goto out;
2633 		}
2634 		/*
2635 		 * Verify requested access.
2636 		 */
2637 		error = smbfs_access(vp, mode, 0, cr, ct);
2638 		if (error) {
2639 			VN_RELE(vp);
2640 			goto out;
2641 		}
2642 
2643 		/*
2644 		 * Truncate (if requested).
2645 		 */
2646 		if ((vattr.va_mask & AT_SIZE) && vp->v_type == VREG) {
2647 			np = VTOSMB(vp);
2648 			/*
2649 			 * Check here for large file truncation by
2650 			 * LF-unaware process, like ufs_create().
2651 			 */
2652 			if (!(lfaware & FOFFMAX)) {
2653 				mutex_enter(&np->r_statelock);
2654 				if (np->r_size > MAXOFF32_T)
2655 					error = EOVERFLOW;
2656 				mutex_exit(&np->r_statelock);
2657 			}
2658 			if (error) {
2659 				VN_RELE(vp);
2660 				goto out;
2661 			}
2662 			vattr.va_mask = AT_SIZE;
2663 			error = smbfssetattr(vp, &vattr, 0, cr);
2664 			if (error) {
2665 				VN_RELE(vp);
2666 				goto out;
2667 			}
2668 #ifdef	SMBFS_VNEVENT
2669 			/* Existing file was truncated */
2670 			vnevent_create(vp, ct);
2671 #endif
2672 			/* invalidate pages done in smbfssetattr() */
2673 		}
2674 		/* Success! */
2675 		*vpp = vp;
2676 		goto out;
2677 	}
2678 
2679 	/*
2680 	 * The file did not exist.  Need VWRITE in the directory.
2681 	 */
2682 	error = smbfs_access(dvp, VWRITE, 0, cr, ct);
2683 	if (error)
2684 		goto out;
2685 
2686 	/*
2687 	 * Now things get tricky.  We also need to check the
2688 	 * requested open mode against the file we may create.
2689 	 * See comments at smbfs_access_rwx
2690 	 */
2691 	error = smbfs_access_rwx(vfsp, VREG, mode, cr);
2692 	if (error)
2693 		goto out;
2694 
2695 	/*
2696 	 * Now the code derived from Darwin,
2697 	 * but with greater use of NT_CREATE
2698 	 * disposition options.  Much changed.
2699 	 *
2700 	 * Create (or open) a new child node.
2701 	 * Note we handled "." and ".." above.
2702 	 */
2703 
2704 	if (exclusive == EXCL)
2705 		disp = NTCREATEX_DISP_CREATE;
2706 	else {
2707 		/* Truncate regular files if requested. */
2708 		if ((va->va_type == VREG) &&
2709 		    (va->va_mask & AT_SIZE) &&
2710 		    (va->va_size == 0))
2711 			disp = NTCREATEX_DISP_OVERWRITE_IF;
2712 		else
2713 			disp = NTCREATEX_DISP_OPEN_IF;
2714 	}
2715 	xattr = (dnp->n_flag & N_XATTR) ? 1 : 0;
2716 	error = smbfs_smb_create(dnp,
2717 	    name, nmlen, xattr,
2718 	    disp, &scred, &fid);
2719 	if (error)
2720 		goto out;
2721 
2722 	/*
2723 	 * Should use the fid to get/set the size
2724 	 * while we have it opened here.  See above.
2725 	 */
2726 
2727 	cerror = smbfs_smb_close(smi->smi_share, fid, NULL, &scred);
2728 	if (cerror)
2729 		SMBVDEBUG("error %d closing %s\\%s\n",
2730 		    cerror, dnp->n_rpath, name);
2731 
2732 	/*
2733 	 * In the open case, the name may differ a little
2734 	 * from what we passed to create (case, etc.)
2735 	 * so call lookup to get the (opened) name.
2736 	 *
2737 	 * XXX: Could avoid this extra lookup if the
2738 	 * "createact" result from NT_CREATE says we
2739 	 * created the object.
2740 	 */
2741 	error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
2742 	if (error)
2743 		goto out;
2744 
2745 	/* update attr and directory cache */
2746 	smbfs_attr_touchdir(dnp);
2747 
2748 	error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
2749 	if (error)
2750 		goto out;
2751 
2752 	/* Success! */
2753 	*vpp = vp;
2754 	error = 0;
2755 
2756 out:
2757 	smb_credrele(&scred);
2758 	smbfs_rw_exit(&dnp->r_rwlock);
2759 	if (name != nm)
2760 		smbfs_name_free(name, nmlen);
2761 	return (error);
2762 }
2763 
2764 /*
2765  * XXX
2766  * This op should support the new FIGNORECASE flag for case-insensitive
2767  * lookups, per PSARC 2007/244.
2768  */
2769 /* ARGSUSED */
2770 static int
2771 smbfs_remove(vnode_t *dvp, char *nm, cred_t *cr, caller_context_t *ct,
2772 	int flags)
2773 {
2774 	struct smb_cred	scred;
2775 	vnode_t		*vp = NULL;
2776 	smbnode_t	*dnp = VTOSMB(dvp);
2777 	smbmntinfo_t	*smi = VTOSMI(dvp);
2778 	int		error;
2779 
2780 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2781 		return (EPERM);
2782 
2783 	if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2784 		return (EIO);
2785 
2786 	/*
2787 	 * Verify access to the dirctory.
2788 	 */
2789 	error = smbfs_access(dvp, VWRITE|VEXEC, 0, cr, ct);
2790 	if (error)
2791 		return (error);
2792 
2793 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
2794 		return (EINTR);
2795 	smb_credinit(&scred, cr);
2796 
2797 	/* Lookup the file to remove. */
2798 	error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
2799 	if (error == 0) {
2800 		/*
2801 		 * Do the real remove work
2802 		 */
2803 		error = smbfsremove(dvp, vp, &scred, flags);
2804 		VN_RELE(vp);
2805 	}
2806 
2807 	smb_credrele(&scred);
2808 	smbfs_rw_exit(&dnp->r_rwlock);
2809 
2810 	return (error);
2811 }
2812 
2813 /*
2814  * smbfsremove does the real work of removing in SMBFS
2815  * Caller has done dir access checks etc.
2816  *
2817  * The normal way to delete a file over SMB is open it (with DELETE access),
2818  * set the "delete-on-close" flag, and close the file.  The problem for Unix
2819  * applications is that they expect the file name to be gone once the unlink
2820  * completes, and the SMB server does not actually delete the file until ALL
2821  * opens of that file are closed.  We can't assume our open handles are the
2822  * only open handles on a file we're deleting, so to be safe we'll try to
2823  * rename the file to a temporary name and then set delete-on-close.  If we
2824  * fail to set delete-on-close (i.e. because other opens prevent it) then
2825  * undo the changes we made and give up with EBUSY.  Note that we might have
2826  * permission to delete a file but lack permission to rename, so we want to
2827  * continue in cases where rename fails.  As an optimization, only do the
2828  * rename when we have the file open.
2829  *
2830  * This is similar to what NFS does when deleting a file that has local opens,
2831  * but thanks to SMB delete-on-close, we don't need to keep track of when the
2832  * last local open goes away and send a delete.  The server does that for us.
2833  */
2834 /* ARGSUSED */
2835 static int
2836 smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
2837     int flags)
2838 {
2839 	smbnode_t	*dnp = VTOSMB(dvp);
2840 	smbnode_t	*np = VTOSMB(vp);
2841 	char		*tmpname = NULL;
2842 	int		tnlen;
2843 	int		error;
2844 	unsigned short	fid;
2845 	boolean_t	have_fid = B_FALSE;
2846 	boolean_t	renamed = B_FALSE;
2847 
2848 	/*
2849 	 * The dvp RWlock must be held as writer.
2850 	 */
2851 	ASSERT(dnp->r_rwlock.owner == curthread);
2852 
2853 	/* Never allow link/unlink directories on SMB. */
2854 	if (vp->v_type == VDIR)
2855 		return (EPERM);
2856 
2857 	/*
2858 	 * We need to flush any dirty pages which happen to
2859 	 * be hanging around before removing the file.  This
2860 	 * shouldn't happen very often and mostly on file
2861 	 * systems mounted "nocto".
2862 	 */
2863 	if (vn_has_cached_data(vp) &&
2864 	    ((np->r_flags & RDIRTY) || np->r_count > 0)) {
2865 		error = smbfs_putpage(vp, (offset_t)0, 0, 0,
2866 		    scred->scr_cred, NULL);
2867 		if (error && (error == ENOSPC || error == EDQUOT)) {
2868 			mutex_enter(&np->r_statelock);
2869 			if (!np->r_error)
2870 				np->r_error = error;
2871 			mutex_exit(&np->r_statelock);
2872 		}
2873 	}
2874 
2875 	/* Shared lock for n_fid use in smbfs_smb_setdisp etc. */
2876 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
2877 		return (EINTR);
2878 
2879 	/*
2880 	 * Get a file handle with delete access.
2881 	 * Close this FID before return.
2882 	 */
2883 	error = smbfs_smb_tmpopen(np, STD_RIGHT_DELETE_ACCESS,
2884 	    scred, &fid);
2885 	if (error) {
2886 		SMBVDEBUG("error %d opening %s\n",
2887 		    error, np->n_rpath);
2888 		goto out;
2889 	}
2890 	have_fid = B_TRUE;
2891 
2892 	/*
2893 	 * If we have the file open, try to rename it to a temporary name.
2894 	 * If we can't rename, continue on and try setting DoC anyway.
2895 	 */
2896 	if ((vp->v_count > 1) && (np->n_fidrefs > 0)) {
2897 		tmpname = kmem_alloc(MAXNAMELEN, KM_SLEEP);
2898 		tnlen = smbfs_newname(tmpname, MAXNAMELEN);
2899 		error = smbfs_smb_t2rename(np, tmpname, tnlen, scred, fid, 0);
2900 		if (error != 0) {
2901 			SMBVDEBUG("error %d renaming %s -> %s\n",
2902 			    error, np->n_rpath, tmpname);
2903 			/* Keep going without the rename. */
2904 		} else {
2905 			renamed = B_TRUE;
2906 		}
2907 	}
2908 
2909 	/*
2910 	 * Mark the file as delete-on-close.  If we can't,
2911 	 * undo what we did and err out.
2912 	 */
2913 	error = smbfs_smb_setdisp(np, fid, 1, scred);
2914 	if (error != 0) {
2915 		SMBVDEBUG("error %d setting DoC on %s\n",
2916 		    error, np->n_rpath);
2917 		/*
2918 		 * Failed to set DoC. If we renamed, undo that.
2919 		 * Need np->n_rpath relative to parent (dnp).
2920 		 * Use parent path name length plus one for
2921 		 * the separator ('/' or ':')
2922 		 */
2923 		if (renamed) {
2924 			char *oldname;
2925 			int oldnlen;
2926 			int err2;
2927 
2928 			oldname = np->n_rpath + (dnp->n_rplen + 1);
2929 			oldnlen = np->n_rplen - (dnp->n_rplen + 1);
2930 			err2 = smbfs_smb_t2rename(np, oldname, oldnlen,
2931 			    scred, fid, 0);
2932 			SMBVDEBUG("error %d un-renaming %s -> %s\n",
2933 			    err2, tmpname, np->n_rpath);
2934 		}
2935 		error = EBUSY;
2936 		goto out;
2937 	}
2938 	/* Done! */
2939 	smbfs_attrcache_prune(np);
2940 
2941 #ifdef	SMBFS_VNEVENT
2942 	vnevent_remove(vp, dvp, nm, ct);
2943 #endif
2944 
2945 out:
2946 	if (tmpname != NULL)
2947 		kmem_free(tmpname, MAXNAMELEN);
2948 
2949 	if (have_fid)
2950 		(void) smbfs_smb_tmpclose(np, fid, scred);
2951 	smbfs_rw_exit(&np->r_lkserlock);
2952 
2953 	if (error == 0) {
2954 		/* Keep lookup from finding this node anymore. */
2955 		smbfs_rmhash(np);
2956 	}
2957 
2958 	return (error);
2959 }
2960 
2961 
2962 /* ARGSUSED */
2963 static int
2964 smbfs_link(vnode_t *tdvp, vnode_t *svp, char *tnm, cred_t *cr,
2965 	caller_context_t *ct, int flags)
2966 {
2967 	/* Not yet... */
2968 	return (ENOSYS);
2969 }
2970 
2971 
2972 /*
2973  * XXX
2974  * This op should support the new FIGNORECASE flag for case-insensitive
2975  * lookups, per PSARC 2007/244.
2976  */
2977 /* ARGSUSED */
2978 static int
2979 smbfs_rename(vnode_t *odvp, char *onm, vnode_t *ndvp, char *nnm, cred_t *cr,
2980 	caller_context_t *ct, int flags)
2981 {
2982 	struct smb_cred	scred;
2983 	smbnode_t	*odnp = VTOSMB(odvp);
2984 	smbnode_t	*ndnp = VTOSMB(ndvp);
2985 	vnode_t		*ovp;
2986 	int error;
2987 
2988 	if (curproc->p_zone != VTOSMI(odvp)->smi_zone_ref.zref_zone ||
2989 	    curproc->p_zone != VTOSMI(ndvp)->smi_zone_ref.zref_zone)
2990 		return (EPERM);
2991 
2992 	if (VTOSMI(odvp)->smi_flags & SMI_DEAD ||
2993 	    VTOSMI(ndvp)->smi_flags & SMI_DEAD ||
2994 	    odvp->v_vfsp->vfs_flag & VFS_UNMOUNTED ||
2995 	    ndvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2996 		return (EIO);
2997 
2998 	if (strcmp(onm, ".") == 0 || strcmp(onm, "..") == 0 ||
2999 	    strcmp(nnm, ".") == 0 || strcmp(nnm, "..") == 0)
3000 		return (EINVAL);
3001 
3002 	/*
3003 	 * Check that everything is on the same filesystem.
3004 	 * vn_rename checks the fsid's, but in case we don't
3005 	 * fill those in correctly, check here too.
3006 	 */
3007 	if (odvp->v_vfsp != ndvp->v_vfsp)
3008 		return (EXDEV);
3009 
3010 	/*
3011 	 * Need write access on source and target.
3012 	 * Server takes care of most checks.
3013 	 */
3014 	error = smbfs_access(odvp, VWRITE|VEXEC, 0, cr, ct);
3015 	if (error)
3016 		return (error);
3017 	if (odvp != ndvp) {
3018 		error = smbfs_access(ndvp, VWRITE, 0, cr, ct);
3019 		if (error)
3020 			return (error);
3021 	}
3022 
3023 	/*
3024 	 * Need to lock both old/new dirs as writer.
3025 	 *
3026 	 * Avoid deadlock here on old vs new directory nodes
3027 	 * by always taking the locks in order of address.
3028 	 * The order is arbitrary, but must be consistent.
3029 	 */
3030 	if (odnp < ndnp) {
3031 		if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
3032 		    SMBINTR(odvp)))
3033 			return (EINTR);
3034 		if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
3035 		    SMBINTR(ndvp))) {
3036 			smbfs_rw_exit(&odnp->r_rwlock);
3037 			return (EINTR);
3038 		}
3039 	} else {
3040 		if (smbfs_rw_enter_sig(&ndnp->r_rwlock, RW_WRITER,
3041 		    SMBINTR(ndvp)))
3042 			return (EINTR);
3043 		if (smbfs_rw_enter_sig(&odnp->r_rwlock, RW_WRITER,
3044 		    SMBINTR(odvp))) {
3045 			smbfs_rw_exit(&ndnp->r_rwlock);
3046 			return (EINTR);
3047 		}
3048 	}
3049 	smb_credinit(&scred, cr);
3050 
3051 	/* Lookup the "old" name */
3052 	error = smbfslookup(odvp, onm, &ovp, cr, 0, ct);
3053 	if (error == 0) {
3054 		/*
3055 		 * Do the real rename work
3056 		 */
3057 		error = smbfsrename(odvp, ovp, ndvp, nnm, &scred, flags);
3058 		VN_RELE(ovp);
3059 	}
3060 
3061 	smb_credrele(&scred);
3062 	smbfs_rw_exit(&odnp->r_rwlock);
3063 	smbfs_rw_exit(&ndnp->r_rwlock);
3064 
3065 	return (error);
3066 }
3067 
3068 /*
3069  * smbfsrename does the real work of renaming in SMBFS
3070  * Caller has done dir access checks etc.
3071  */
3072 /* ARGSUSED */
3073 static int
3074 smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp, char *nnm,
3075     struct smb_cred *scred, int flags)
3076 {
3077 	smbnode_t	*odnp = VTOSMB(odvp);
3078 	smbnode_t	*onp = VTOSMB(ovp);
3079 	smbnode_t	*ndnp = VTOSMB(ndvp);
3080 	vnode_t		*nvp = NULL;
3081 	int		error;
3082 	int		nvp_locked = 0;
3083 
3084 	/* Things our caller should have checked. */
3085 	ASSERT(curproc->p_zone == VTOSMI(odvp)->smi_zone_ref.zref_zone);
3086 	ASSERT(odvp->v_vfsp == ndvp->v_vfsp);
3087 	ASSERT(odnp->r_rwlock.owner == curthread);
3088 	ASSERT(ndnp->r_rwlock.owner == curthread);
3089 
3090 	/*
3091 	 * Lookup the target file.  If it exists, it needs to be
3092 	 * checked to see whether it is a mount point and whether
3093 	 * it is active (open).
3094 	 */
3095 	error = smbfslookup(ndvp, nnm, &nvp, scred->scr_cred, 0, NULL);
3096 	if (!error) {
3097 		/*
3098 		 * Target (nvp) already exists.  Check that it
3099 		 * has the same type as the source.  The server
3100 		 * will check this also, (and more reliably) but
3101 		 * this lets us return the correct error codes.
3102 		 */
3103 		if (ovp->v_type == VDIR) {
3104 			if (nvp->v_type != VDIR) {
3105 				error = ENOTDIR;
3106 				goto out;
3107 			}
3108 		} else {
3109 			if (nvp->v_type == VDIR) {
3110 				error = EISDIR;
3111 				goto out;
3112 			}
3113 		}
3114 
3115 		/*
3116 		 * POSIX dictates that when the source and target
3117 		 * entries refer to the same file object, rename
3118 		 * must do nothing and exit without error.
3119 		 */
3120 		if (ovp == nvp) {
3121 			error = 0;
3122 			goto out;
3123 		}
3124 
3125 		/*
3126 		 * Also must ensure the target is not a mount point,
3127 		 * and keep mount/umount away until we're done.
3128 		 */
3129 		if (vn_vfsrlock(nvp)) {
3130 			error = EBUSY;
3131 			goto out;
3132 		}
3133 		nvp_locked = 1;
3134 		if (vn_mountedvfs(nvp) != NULL) {
3135 			error = EBUSY;
3136 			goto out;
3137 		}
3138 
3139 		/*
3140 		 * CIFS may give a SHARING_VIOLATION error when
3141 		 * trying to rename onto an exising object,
3142 		 * so try to remove the target first.
3143 		 * (Only for files, not directories.)
3144 		 */
3145 		if (nvp->v_type == VDIR) {
3146 			error = EEXIST;
3147 			goto out;
3148 		}
3149 		error = smbfsremove(ndvp, nvp, scred, flags);
3150 		if (error != 0)
3151 			goto out;
3152 
3153 		/*
3154 		 * OK, removed the target file.  Continue as if
3155 		 * lookup target had failed (nvp == NULL).
3156 		 */
3157 		vn_vfsunlock(nvp);
3158 		nvp_locked = 0;
3159 		VN_RELE(nvp);
3160 		nvp = NULL;
3161 	} /* nvp */
3162 
3163 	smbfs_attrcache_remove(onp);
3164 	error = smbfs_smb_rename(onp, ndnp, nnm, strlen(nnm), scred);
3165 
3166 	/*
3167 	 * If the old name should no longer exist,
3168 	 * discard any cached attributes under it.
3169 	 */
3170 	if (error == 0) {
3171 		smbfs_attrcache_prune(onp);
3172 		/* SMBFS_VNEVENT... */
3173 	}
3174 
3175 out:
3176 	if (nvp) {
3177 		if (nvp_locked)
3178 			vn_vfsunlock(nvp);
3179 		VN_RELE(nvp);
3180 	}
3181 
3182 	return (error);
3183 }
3184 
3185 /*
3186  * XXX
3187  * vsecattr_t is new to build 77, and we need to eventually support
3188  * it in order to create an ACL when an object is created.
3189  *
3190  * This op should support the new FIGNORECASE flag for case-insensitive
3191  * lookups, per PSARC 2007/244.
3192  */
3193 /* ARGSUSED */
3194 static int
3195 smbfs_mkdir(vnode_t *dvp, char *nm, struct vattr *va, vnode_t **vpp,
3196 	cred_t *cr, caller_context_t *ct, int flags, vsecattr_t *vsecp)
3197 {
3198 	vnode_t		*vp;
3199 	struct smbnode	*dnp = VTOSMB(dvp);
3200 	struct smbmntinfo *smi = VTOSMI(dvp);
3201 	struct smb_cred	scred;
3202 	struct smbfattr	fattr;
3203 	const char		*name = (const char *) nm;
3204 	int		nmlen = strlen(name);
3205 	int		error, hiderr;
3206 
3207 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3208 		return (EPERM);
3209 
3210 	if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3211 		return (EIO);
3212 
3213 	if ((nmlen == 1 && name[0] == '.') ||
3214 	    (nmlen == 2 && name[0] == '.' && name[1] == '.'))
3215 		return (EEXIST);
3216 
3217 	/* Only plain files are allowed in V_XATTRDIR. */
3218 	if (dvp->v_flag & V_XATTRDIR)
3219 		return (EINVAL);
3220 
3221 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
3222 		return (EINTR);
3223 	smb_credinit(&scred, cr);
3224 
3225 	/*
3226 	 * Require write access in the containing directory.
3227 	 */
3228 	error = smbfs_access(dvp, VWRITE, 0, cr, ct);
3229 	if (error)
3230 		goto out;
3231 
3232 	error = smbfs_smb_mkdir(dnp, name, nmlen, &scred);
3233 	if (error)
3234 		goto out;
3235 
3236 	error = smbfs_smb_lookup(dnp, &name, &nmlen, &fattr, &scred);
3237 	if (error)
3238 		goto out;
3239 
3240 	smbfs_attr_touchdir(dnp);
3241 
3242 	error = smbfs_nget(dvp, name, nmlen, &fattr, &vp);
3243 	if (error)
3244 		goto out;
3245 
3246 	if (name[0] == '.')
3247 		if ((hiderr = smbfs_smb_hideit(VTOSMB(vp), NULL, 0, &scred)))
3248 			SMBVDEBUG("hide failure %d\n", hiderr);
3249 
3250 	/* Success! */
3251 	*vpp = vp;
3252 	error = 0;
3253 out:
3254 	smb_credrele(&scred);
3255 	smbfs_rw_exit(&dnp->r_rwlock);
3256 
3257 	if (name != nm)
3258 		smbfs_name_free(name, nmlen);
3259 
3260 	return (error);
3261 }
3262 
3263 /*
3264  * XXX
3265  * This op should support the new FIGNORECASE flag for case-insensitive
3266  * lookups, per PSARC 2007/244.
3267  */
3268 /* ARGSUSED */
3269 static int
3270 smbfs_rmdir(vnode_t *dvp, char *nm, vnode_t *cdir, cred_t *cr,
3271 	caller_context_t *ct, int flags)
3272 {
3273 	vnode_t		*vp = NULL;
3274 	int		vp_locked = 0;
3275 	struct smbmntinfo *smi = VTOSMI(dvp);
3276 	struct smbnode	*dnp = VTOSMB(dvp);
3277 	struct smbnode	*np;
3278 	struct smb_cred	scred;
3279 	int		error;
3280 
3281 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3282 		return (EPERM);
3283 
3284 	if (smi->smi_flags & SMI_DEAD || dvp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3285 		return (EIO);
3286 
3287 	if (smbfs_rw_enter_sig(&dnp->r_rwlock, RW_WRITER, SMBINTR(dvp)))
3288 		return (EINTR);
3289 	smb_credinit(&scred, cr);
3290 
3291 	/*
3292 	 * Require w/x access in the containing directory.
3293 	 * Server handles all other access checks.
3294 	 */
3295 	error = smbfs_access(dvp, VEXEC|VWRITE, 0, cr, ct);
3296 	if (error)
3297 		goto out;
3298 
3299 	/*
3300 	 * First lookup the entry to be removed.
3301 	 */
3302 	error = smbfslookup(dvp, nm, &vp, cr, 0, ct);
3303 	if (error)
3304 		goto out;
3305 	np = VTOSMB(vp);
3306 
3307 	/*
3308 	 * Disallow rmdir of "." or current dir, or the FS root.
3309 	 * Also make sure it's a directory, not a mount point,
3310 	 * and lock to keep mount/umount away until we're done.
3311 	 */
3312 	if ((vp == dvp) || (vp == cdir) || (vp->v_flag & VROOT)) {
3313 		error = EINVAL;
3314 		goto out;
3315 	}
3316 	if (vp->v_type != VDIR) {
3317 		error = ENOTDIR;
3318 		goto out;
3319 	}
3320 	if (vn_vfsrlock(vp)) {
3321 		error = EBUSY;
3322 		goto out;
3323 	}
3324 	vp_locked = 1;
3325 	if (vn_mountedvfs(vp) != NULL) {
3326 		error = EBUSY;
3327 		goto out;
3328 	}
3329 
3330 	smbfs_attrcache_remove(np);
3331 	error = smbfs_smb_rmdir(np, &scred);
3332 
3333 	/*
3334 	 * Similar to smbfs_remove
3335 	 */
3336 	switch (error) {
3337 	case 0:
3338 	case ENOENT:
3339 	case ENOTDIR:
3340 		smbfs_attrcache_prune(np);
3341 		break;
3342 	}
3343 
3344 	if (error)
3345 		goto out;
3346 
3347 	mutex_enter(&np->r_statelock);
3348 	dnp->n_flag |= NMODIFIED;
3349 	mutex_exit(&np->r_statelock);
3350 	smbfs_attr_touchdir(dnp);
3351 	smbfs_rmhash(np);
3352 
3353 out:
3354 	if (vp) {
3355 		if (vp_locked)
3356 			vn_vfsunlock(vp);
3357 		VN_RELE(vp);
3358 	}
3359 	smb_credrele(&scred);
3360 	smbfs_rw_exit(&dnp->r_rwlock);
3361 
3362 	return (error);
3363 }
3364 
3365 
3366 /* ARGSUSED */
3367 static int
3368 smbfs_symlink(vnode_t *dvp, char *lnm, struct vattr *tva, char *tnm, cred_t *cr,
3369 	caller_context_t *ct, int flags)
3370 {
3371 	/* Not yet... */
3372 	return (ENOSYS);
3373 }
3374 
3375 
3376 /* ARGSUSED */
3377 static int
3378 smbfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
3379 	caller_context_t *ct, int flags)
3380 {
3381 	struct smbnode	*np = VTOSMB(vp);
3382 	int		error = 0;
3383 	smbmntinfo_t	*smi;
3384 
3385 	smi = VTOSMI(vp);
3386 
3387 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3388 		return (EIO);
3389 
3390 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3391 		return (EIO);
3392 
3393 	/*
3394 	 * Require read access in the directory.
3395 	 */
3396 	error = smbfs_access(vp, VREAD, 0, cr, ct);
3397 	if (error)
3398 		return (error);
3399 
3400 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
3401 
3402 	/*
3403 	 * Todo readdir cache here
3404 	 *
3405 	 * I am serializing the entire readdir opreation
3406 	 * now since we have not yet implemented readdir
3407 	 * cache. This fix needs to be revisited once
3408 	 * we implement readdir cache.
3409 	 */
3410 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
3411 		return (EINTR);
3412 
3413 	error = smbfs_readvdir(vp, uiop, cr, eofp, ct);
3414 
3415 	smbfs_rw_exit(&np->r_lkserlock);
3416 
3417 	return (error);
3418 }
3419 
3420 /* ARGSUSED */
3421 static int
3422 smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
3423 	caller_context_t *ct)
3424 {
3425 	/*
3426 	 * Note: "limit" tells the SMB-level FindFirst/FindNext
3427 	 * functions how many directory entries to request in
3428 	 * each OtW call.  It needs to be large enough so that
3429 	 * we don't make lots of tiny OtW requests, but there's
3430 	 * no point making it larger than the maximum number of
3431 	 * OtW entries that would fit in a maximum sized trans2
3432 	 * response (64k / 48).  Beyond that, it's just tuning.
3433 	 * WinNT used 512, Win2k used 1366.  We use 1000.
3434 	 */
3435 	static const int limit = 1000;
3436 	/* Largest possible dirent size. */
3437 	static const size_t dbufsiz = DIRENT64_RECLEN(SMB_MAXFNAMELEN);
3438 	struct smb_cred scred;
3439 	vnode_t		*newvp;
3440 	struct smbnode	*np = VTOSMB(vp);
3441 	struct smbfs_fctx *ctx;
3442 	struct dirent64 *dp;
3443 	ssize_t		save_resid;
3444 	offset_t	save_offset; /* 64 bits */
3445 	int		offset; /* yes, 32 bits */
3446 	int		nmlen, error;
3447 	ushort_t	reclen;
3448 
3449 	ASSERT(curproc->p_zone == VTOSMI(vp)->smi_zone_ref.zref_zone);
3450 
3451 	/* Make sure we serialize for n_dirseq use. */
3452 	ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
3453 
3454 	/*
3455 	 * Make sure smbfs_open filled in n_dirseq
3456 	 */
3457 	if (np->n_dirseq == NULL)
3458 		return (EBADF);
3459 
3460 	/* Check for overflow of (32-bit) directory offset. */
3461 	if (uio->uio_loffset < 0 || uio->uio_loffset > INT32_MAX ||
3462 	    (uio->uio_loffset + uio->uio_resid) > INT32_MAX)
3463 		return (EINVAL);
3464 
3465 	/* Require space for at least one dirent. */
3466 	if (uio->uio_resid < dbufsiz)
3467 		return (EINVAL);
3468 
3469 	SMBVDEBUG("dirname='%s'\n", np->n_rpath);
3470 	smb_credinit(&scred, cr);
3471 	dp = kmem_alloc(dbufsiz, KM_SLEEP);
3472 
3473 	save_resid = uio->uio_resid;
3474 	save_offset = uio->uio_loffset;
3475 	offset = uio->uio_offset;
3476 	SMBVDEBUG("in: offset=%d, resid=%d\n",
3477 	    (int)uio->uio_offset, (int)uio->uio_resid);
3478 	error = 0;
3479 
3480 	/*
3481 	 * Generate the "." and ".." entries here so we can
3482 	 * (1) make sure they appear (but only once), and
3483 	 * (2) deal with getting their I numbers which the
3484 	 * findnext below does only for normal names.
3485 	 */
3486 	while (offset < FIRST_DIROFS) {
3487 		/*
3488 		 * Tricky bit filling in the first two:
3489 		 * offset 0 is ".", offset 1 is ".."
3490 		 * so strlen of these is offset+1.
3491 		 */
3492 		reclen = DIRENT64_RECLEN(offset + 1);
3493 		if (uio->uio_resid < reclen)
3494 			goto out;
3495 		bzero(dp, reclen);
3496 		dp->d_reclen = reclen;
3497 		dp->d_name[0] = '.';
3498 		dp->d_name[1] = '.';
3499 		dp->d_name[offset + 1] = '\0';
3500 		/*
3501 		 * Want the real I-numbers for the "." and ".."
3502 		 * entries.  For these two names, we know that
3503 		 * smbfslookup can get the nodes efficiently.
3504 		 */
3505 		error = smbfslookup(vp, dp->d_name, &newvp, cr, 1, ct);
3506 		if (error) {
3507 			dp->d_ino = np->n_ino + offset; /* fiction */
3508 		} else {
3509 			dp->d_ino = VTOSMB(newvp)->n_ino;
3510 			VN_RELE(newvp);
3511 		}
3512 		/*
3513 		 * Note: d_off is the offset that a user-level program
3514 		 * should seek to for reading the NEXT directory entry.
3515 		 * See libc: readdir, telldir, seekdir
3516 		 */
3517 		dp->d_off = offset + 1;
3518 		error = uiomove(dp, reclen, UIO_READ, uio);
3519 		if (error)
3520 			goto out;
3521 		/*
3522 		 * Note: uiomove updates uio->uio_offset,
3523 		 * but we want it to be our "cookie" value,
3524 		 * which just counts dirents ignoring size.
3525 		 */
3526 		uio->uio_offset = ++offset;
3527 	}
3528 
3529 	/*
3530 	 * If there was a backward seek, we have to reopen.
3531 	 */
3532 	if (offset < np->n_dirofs) {
3533 		SMBVDEBUG("Reopening search %d:%d\n",
3534 		    offset, np->n_dirofs);
3535 		error = smbfs_smb_findopen(np, "*", 1,
3536 		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
3537 		    &scred, &ctx);
3538 		if (error) {
3539 			SMBVDEBUG("can not open search, error = %d", error);
3540 			goto out;
3541 		}
3542 		/* free the old one */
3543 		(void) smbfs_smb_findclose(np->n_dirseq, &scred);
3544 		/* save the new one */
3545 		np->n_dirseq = ctx;
3546 		np->n_dirofs = FIRST_DIROFS;
3547 	} else {
3548 		ctx = np->n_dirseq;
3549 	}
3550 
3551 	/*
3552 	 * Skip entries before the requested offset.
3553 	 */
3554 	while (np->n_dirofs < offset) {
3555 		error = smbfs_smb_findnext(ctx, limit, &scred);
3556 		if (error != 0)
3557 			goto out;
3558 		np->n_dirofs++;
3559 	}
3560 
3561 	/*
3562 	 * While there's room in the caller's buffer:
3563 	 *	get a directory entry from SMB,
3564 	 *	convert to a dirent, copyout.
3565 	 * We stop when there is no longer room for a
3566 	 * maximum sized dirent because we must decide
3567 	 * before we know anything about the next entry.
3568 	 */
3569 	while (uio->uio_resid >= dbufsiz) {
3570 		error = smbfs_smb_findnext(ctx, limit, &scred);
3571 		if (error != 0)
3572 			goto out;
3573 		np->n_dirofs++;
3574 
3575 		/* Sanity check the name length. */
3576 		nmlen = ctx->f_nmlen;
3577 		if (nmlen > SMB_MAXFNAMELEN) {
3578 			nmlen = SMB_MAXFNAMELEN;
3579 			SMBVDEBUG("Truncating name: %s\n", ctx->f_name);
3580 		}
3581 		if (smbfs_fastlookup) {
3582 			/* See comment at smbfs_fastlookup above. */
3583 			if (smbfs_nget(vp, ctx->f_name, nmlen,
3584 			    &ctx->f_attr, &newvp) == 0)
3585 				VN_RELE(newvp);
3586 		}
3587 
3588 		reclen = DIRENT64_RECLEN(nmlen);
3589 		bzero(dp, reclen);
3590 		dp->d_reclen = reclen;
3591 		bcopy(ctx->f_name, dp->d_name, nmlen);
3592 		dp->d_name[nmlen] = '\0';
3593 		dp->d_ino = ctx->f_inum;
3594 		dp->d_off = offset + 1;	/* See d_off comment above */
3595 		error = uiomove(dp, reclen, UIO_READ, uio);
3596 		if (error)
3597 			goto out;
3598 		/* See comment re. uio_offset above. */
3599 		uio->uio_offset = ++offset;
3600 	}
3601 
3602 out:
3603 	/*
3604 	 * When we come to the end of a directory, the
3605 	 * SMB-level functions return ENOENT, but the
3606 	 * caller is not expecting an error return.
3607 	 *
3608 	 * Also note that we must delay the call to
3609 	 * smbfs_smb_findclose(np->n_dirseq, ...)
3610 	 * until smbfs_close so that all reads at the
3611 	 * end of the directory will return no data.
3612 	 */
3613 	if (error == ENOENT) {
3614 		error = 0;
3615 		if (eofp)
3616 			*eofp = 1;
3617 	}
3618 	/*
3619 	 * If we encountered an error (i.e. "access denied")
3620 	 * from the FindFirst call, we will have copied out
3621 	 * the "." and ".." entries leaving offset == 2.
3622 	 * In that case, restore the original offset/resid
3623 	 * so the caller gets no data with the error.
3624 	 */
3625 	if (error != 0 && offset == FIRST_DIROFS) {
3626 		uio->uio_loffset = save_offset;
3627 		uio->uio_resid = save_resid;
3628 	}
3629 	SMBVDEBUG("out: offset=%d, resid=%d\n",
3630 	    (int)uio->uio_offset, (int)uio->uio_resid);
3631 
3632 	kmem_free(dp, dbufsiz);
3633 	smb_credrele(&scred);
3634 	return (error);
3635 }
3636 
3637 /*
3638  * Here NFS has: nfs3_bio
3639  * See smbfs_bio above.
3640  */
3641 
3642 /* ARGSUSED */
3643 static int
3644 smbfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
3645 {
3646 	return (ENOSYS);
3647 }
3648 
3649 
3650 /*
3651  * The pair of functions VOP_RWLOCK, VOP_RWUNLOCK
3652  * are optional functions that are called by:
3653  *    getdents, before/after VOP_READDIR
3654  *    pread, before/after ... VOP_READ
3655  *    pwrite, before/after ... VOP_WRITE
3656  *    (other places)
3657  *
3658  * Careful here: None of the above check for any
3659  * error returns from VOP_RWLOCK / VOP_RWUNLOCK!
3660  * In fact, the return value from _rwlock is NOT
3661  * an error code, but V_WRITELOCK_TRUE / _FALSE.
3662  *
3663  * Therefore, it's up to _this_ code to make sure
3664  * the lock state remains balanced, which means
3665  * we can't "bail out" on interrupts, etc.
3666  */
3667 
3668 /* ARGSUSED2 */
3669 static int
3670 smbfs_rwlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3671 {
3672 	smbnode_t	*np = VTOSMB(vp);
3673 
3674 	if (!write_lock) {
3675 		(void) smbfs_rw_enter_sig(&np->r_rwlock, RW_READER, FALSE);
3676 		return (V_WRITELOCK_FALSE);
3677 	}
3678 
3679 
3680 	(void) smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, FALSE);
3681 	return (V_WRITELOCK_TRUE);
3682 }
3683 
3684 /* ARGSUSED */
3685 static void
3686 smbfs_rwunlock(vnode_t *vp, int write_lock, caller_context_t *ctp)
3687 {
3688 	smbnode_t	*np = VTOSMB(vp);
3689 
3690 	smbfs_rw_exit(&np->r_rwlock);
3691 }
3692 
3693 
3694 /* ARGSUSED */
3695 static int
3696 smbfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, caller_context_t *ct)
3697 {
3698 	smbmntinfo_t	*smi;
3699 
3700 	smi = VTOSMI(vp);
3701 
3702 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3703 		return (EPERM);
3704 
3705 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3706 		return (EIO);
3707 
3708 	/*
3709 	 * Because we stuff the readdir cookie into the offset field
3710 	 * someone may attempt to do an lseek with the cookie which
3711 	 * we want to succeed.
3712 	 */
3713 	if (vp->v_type == VDIR)
3714 		return (0);
3715 
3716 	/* Like NFS3, just check for 63-bit overflow. */
3717 	if (*noffp < 0)
3718 		return (EINVAL);
3719 
3720 	return (0);
3721 }
3722 
3723 /* mmap support ******************************************************** */
3724 
3725 #ifdef	_KERNEL
3726 
3727 #ifdef DEBUG
3728 static int smbfs_lostpage = 0;	/* number of times we lost original page */
3729 #endif
3730 
3731 /*
3732  * Return all the pages from [off..off+len) in file
3733  * Like nfs3_getpage
3734  */
3735 /* ARGSUSED */
3736 static int
3737 smbfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp,
3738 	page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3739 	enum seg_rw rw, cred_t *cr, caller_context_t *ct)
3740 {
3741 	smbnode_t	*np;
3742 	smbmntinfo_t	*smi;
3743 	int		error;
3744 
3745 	np = VTOSMB(vp);
3746 	smi = VTOSMI(vp);
3747 
3748 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3749 		return (EIO);
3750 
3751 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3752 		return (EIO);
3753 
3754 	if (vp->v_flag & VNOMAP)
3755 		return (ENOSYS);
3756 
3757 	if (protp != NULL)
3758 		*protp = PROT_ALL;
3759 
3760 	/*
3761 	 * Now valididate that the caches are up to date.
3762 	 */
3763 	error = smbfs_validate_caches(vp, cr);
3764 	if (error)
3765 		return (error);
3766 
3767 retry:
3768 	mutex_enter(&np->r_statelock);
3769 
3770 	/*
3771 	 * Don't create dirty pages faster than they
3772 	 * can be cleaned ... (etc. see nfs)
3773 	 *
3774 	 * Here NFS also tests:
3775 	 *  (mi->mi_max_threads != 0 &&
3776 	 *  rp->r_awcount > 2 * mi->mi_max_threads)
3777 	 */
3778 	if (rw == S_CREATE) {
3779 		while (np->r_gcount > 0)
3780 			cv_wait(&np->r_cv, &np->r_statelock);
3781 	}
3782 
3783 	/*
3784 	 * If we are getting called as a side effect of a write
3785 	 * operation the local file size might not be extended yet.
3786 	 * In this case we want to be able to return pages of zeroes.
3787 	 */
3788 	if (off + len > np->r_size + PAGEOFFSET && seg != segkmap) {
3789 		mutex_exit(&np->r_statelock);
3790 		return (EFAULT);		/* beyond EOF */
3791 	}
3792 
3793 	mutex_exit(&np->r_statelock);
3794 
3795 	error = pvn_getpages(smbfs_getapage, vp, off, len, protp,
3796 	    pl, plsz, seg, addr, rw, cr);
3797 
3798 	switch (error) {
3799 	case SMBFS_EOF:
3800 		smbfs_purge_caches(vp, cr);
3801 		goto retry;
3802 	case ESTALE:
3803 		/*
3804 		 * Here NFS has: PURGE_STALE_FH(error, vp, cr);
3805 		 * In-line here as we only use it once.
3806 		 */
3807 		mutex_enter(&np->r_statelock);
3808 		np->r_flags |= RSTALE;
3809 		if (!np->r_error)
3810 			np->r_error = (error);
3811 		mutex_exit(&np->r_statelock);
3812 		if (vn_has_cached_data(vp))
3813 			smbfs_invalidate_pages(vp, (u_offset_t)0, cr);
3814 		smbfs_purge_caches(vp, cr);
3815 		break;
3816 	default:
3817 		break;
3818 	}
3819 
3820 	return (error);
3821 }
3822 
3823 /*
3824  * Called from pvn_getpages to get a particular page.
3825  * Like nfs3_getapage
3826  */
3827 /* ARGSUSED */
3828 static int
3829 smbfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
3830 	page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr,
3831 	enum seg_rw rw, cred_t *cr)
3832 {
3833 	smbnode_t	*np;
3834 	smbmntinfo_t   *smi;
3835 
3836 	uint_t		bsize;
3837 	struct buf	*bp;
3838 	page_t		*pp;
3839 	u_offset_t	lbn;
3840 	u_offset_t	io_off;
3841 	u_offset_t	blkoff;
3842 	size_t		io_len;
3843 	uint_t blksize;
3844 	int error;
3845 	/* int readahead; */
3846 	int readahead_issued = 0;
3847 	/* int ra_window; * readahead window */
3848 	page_t *pagefound;
3849 
3850 	np = VTOSMB(vp);
3851 	smi = VTOSMI(vp);
3852 
3853 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
3854 		return (EIO);
3855 
3856 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
3857 		return (EIO);
3858 
3859 	bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
3860 
3861 reread:
3862 	bp = NULL;
3863 	pp = NULL;
3864 	pagefound = NULL;
3865 
3866 	if (pl != NULL)
3867 		pl[0] = NULL;
3868 
3869 	error = 0;
3870 	lbn = off / bsize;
3871 	blkoff = lbn * bsize;
3872 
3873 	/*
3874 	 * NFS queues up readahead work here.
3875 	 */
3876 
3877 again:
3878 	if ((pagefound = page_exists(vp, off)) == NULL) {
3879 		if (pl == NULL) {
3880 			(void) 0; /* Todo: smbfs_async_readahead(); */
3881 		} else if (rw == S_CREATE) {
3882 			/*
3883 			 * Block for this page is not allocated, or the offset
3884 			 * is beyond the current allocation size, or we're
3885 			 * allocating a swap slot and the page was not found,
3886 			 * so allocate it and return a zero page.
3887 			 */
3888 			if ((pp = page_create_va(vp, off,
3889 			    PAGESIZE, PG_WAIT, seg, addr)) == NULL)
3890 				cmn_err(CE_PANIC, "smbfs_getapage: page_create");
3891 			io_len = PAGESIZE;
3892 			mutex_enter(&np->r_statelock);
3893 			np->r_nextr = off + PAGESIZE;
3894 			mutex_exit(&np->r_statelock);
3895 		} else {
3896 			/*
3897 			 * Need to go to server to get a BLOCK, exception to
3898 			 * that being while reading at offset = 0 or doing
3899 			 * random i/o, in that case read only a PAGE.
3900 			 */
3901 			mutex_enter(&np->r_statelock);
3902 			if (blkoff < np->r_size &&
3903 			    blkoff + bsize >= np->r_size) {
3904 				/*
3905 				 * If only a block or less is left in
3906 				 * the file, read all that is remaining.
3907 				 */
3908 				if (np->r_size <= off) {
3909 					/*
3910 					 * Trying to access beyond EOF,
3911 					 * set up to get at least one page.
3912 					 */
3913 					blksize = off + PAGESIZE - blkoff;
3914 				} else
3915 					blksize = np->r_size - blkoff;
3916 			} else if ((off == 0) ||
3917 			    (off != np->r_nextr && !readahead_issued)) {
3918 				blksize = PAGESIZE;
3919 				blkoff = off; /* block = page here */
3920 			} else
3921 				blksize = bsize;
3922 			mutex_exit(&np->r_statelock);
3923 
3924 			pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
3925 			    &io_len, blkoff, blksize, 0);
3926 
3927 			/*
3928 			 * Some other thread has entered the page,
3929 			 * so just use it.
3930 			 */
3931 			if (pp == NULL)
3932 				goto again;
3933 
3934 			/*
3935 			 * Now round the request size up to page boundaries.
3936 			 * This ensures that the entire page will be
3937 			 * initialized to zeroes if EOF is encountered.
3938 			 */
3939 			io_len = ptob(btopr(io_len));
3940 
3941 			bp = pageio_setup(pp, io_len, vp, B_READ);
3942 			ASSERT(bp != NULL);
3943 
3944 			/*
3945 			 * pageio_setup should have set b_addr to 0.  This
3946 			 * is correct since we want to do I/O on a page
3947 			 * boundary.  bp_mapin will use this addr to calculate
3948 			 * an offset, and then set b_addr to the kernel virtual
3949 			 * address it allocated for us.
3950 			 */
3951 			ASSERT(bp->b_un.b_addr == 0);
3952 
3953 			bp->b_edev = 0;
3954 			bp->b_dev = 0;
3955 			bp->b_lblkno = lbtodb(io_off);
3956 			bp->b_file = vp;
3957 			bp->b_offset = (offset_t)off;
3958 			bp_mapin(bp);
3959 
3960 			/*
3961 			 * If doing a write beyond what we believe is EOF,
3962 			 * don't bother trying to read the pages from the
3963 			 * server, we'll just zero the pages here.  We
3964 			 * don't check that the rw flag is S_WRITE here
3965 			 * because some implementations may attempt a
3966 			 * read access to the buffer before copying data.
3967 			 */
3968 			mutex_enter(&np->r_statelock);
3969 			if (io_off >= np->r_size && seg == segkmap) {
3970 				mutex_exit(&np->r_statelock);
3971 				bzero(bp->b_un.b_addr, io_len);
3972 			} else {
3973 				mutex_exit(&np->r_statelock);
3974 				error = smbfs_bio(bp, 0, cr);
3975 			}
3976 
3977 			/*
3978 			 * Unmap the buffer before freeing it.
3979 			 */
3980 			bp_mapout(bp);
3981 			pageio_done(bp);
3982 
3983 			/* Here NFS3 updates all pp->p_fsdata */
3984 
3985 			if (error == SMBFS_EOF) {
3986 				/*
3987 				 * If doing a write system call just return
3988 				 * zeroed pages, else user tried to get pages
3989 				 * beyond EOF, return error.  We don't check
3990 				 * that the rw flag is S_WRITE here because
3991 				 * some implementations may attempt a read
3992 				 * access to the buffer before copying data.
3993 				 */
3994 				if (seg == segkmap)
3995 					error = 0;
3996 				else
3997 					error = EFAULT;
3998 			}
3999 
4000 			if (!readahead_issued && !error) {
4001 				mutex_enter(&np->r_statelock);
4002 				np->r_nextr = io_off + io_len;
4003 				mutex_exit(&np->r_statelock);
4004 			}
4005 		}
4006 	}
4007 
4008 	if (pl == NULL)
4009 		return (error);
4010 
4011 	if (error) {
4012 		if (pp != NULL)
4013 			pvn_read_done(pp, B_ERROR);
4014 		return (error);
4015 	}
4016 
4017 	if (pagefound) {
4018 		se_t se = (rw == S_CREATE ? SE_EXCL : SE_SHARED);
4019 
4020 		/*
4021 		 * Page exists in the cache, acquire the appropriate lock.
4022 		 * If this fails, start all over again.
4023 		 */
4024 		if ((pp = page_lookup(vp, off, se)) == NULL) {
4025 #ifdef DEBUG
4026 			smbfs_lostpage++;
4027 #endif
4028 			goto reread;
4029 		}
4030 		pl[0] = pp;
4031 		pl[1] = NULL;
4032 		return (0);
4033 	}
4034 
4035 	if (pp != NULL)
4036 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4037 
4038 	return (error);
4039 }
4040 
4041 /*
4042  * Here NFS has: nfs3_readahead
4043  * No read-ahead in smbfs yet.
4044  */
4045 
4046 #endif	// _KERNEL
4047 
4048 /*
4049  * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE}
4050  * If len == 0, do from off to EOF.
4051  *
4052  * The normal cases should be len == 0 && off == 0 (entire vp list),
4053  * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
4054  * (from pageout).
4055  *
4056  * Like nfs3_putpage + nfs_putpages
4057  */
4058 /* ARGSUSED */
4059 static int
4060 smbfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr,
4061 	caller_context_t *ct)
4062 {
4063 #ifdef	_KERNEL
4064 	smbnode_t *np;
4065 	smbmntinfo_t *smi;
4066 	page_t *pp;
4067 	u_offset_t eoff;
4068 	u_offset_t io_off;
4069 	size_t io_len;
4070 	int error;
4071 	int rdirty;
4072 	int err;
4073 
4074 	np = VTOSMB(vp);
4075 	smi = VTOSMI(vp);
4076 
4077 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4078 		return (EIO);
4079 
4080 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4081 		return (EIO);
4082 
4083 	if (vp->v_flag & VNOMAP)
4084 		return (ENOSYS);
4085 
4086 	/* Here NFS does rp->r_count (++/--) stuff. */
4087 
4088 	/* Beginning of code from nfs_putpages. */
4089 
4090 	if (!vn_has_cached_data(vp))
4091 		return (0);
4092 
4093 	/*
4094 	 * If ROUTOFSPACE is set, then all writes turn into B_INVAL
4095 	 * writes.  B_FORCE is set to force the VM system to actually
4096 	 * invalidate the pages, even if the i/o failed.  The pages
4097 	 * need to get invalidated because they can't be written out
4098 	 * because there isn't any space left on either the server's
4099 	 * file system or in the user's disk quota.  The B_FREE bit
4100 	 * is cleared to avoid confusion as to whether this is a
4101 	 * request to place the page on the freelist or to destroy
4102 	 * it.
4103 	 */
4104 	if ((np->r_flags & ROUTOFSPACE) ||
4105 	    (vp->v_vfsp->vfs_flag & VFS_UNMOUNTED))
4106 		flags = (flags & ~B_FREE) | B_INVAL | B_FORCE;
4107 
4108 	if (len == 0) {
4109 		/*
4110 		 * If doing a full file synchronous operation, then clear
4111 		 * the RDIRTY bit.  If a page gets dirtied while the flush
4112 		 * is happening, then RDIRTY will get set again.  The
4113 		 * RDIRTY bit must get cleared before the flush so that
4114 		 * we don't lose this information.
4115 		 *
4116 		 * NFS has B_ASYNC vs sync stuff here.
4117 		 */
4118 		if (off == (u_offset_t)0 &&
4119 		    (np->r_flags & RDIRTY)) {
4120 			mutex_enter(&np->r_statelock);
4121 			rdirty = (np->r_flags & RDIRTY);
4122 			np->r_flags &= ~RDIRTY;
4123 			mutex_exit(&np->r_statelock);
4124 		} else
4125 			rdirty = 0;
4126 
4127 		/*
4128 		 * Search the entire vp list for pages >= off, and flush
4129 		 * the dirty pages.
4130 		 */
4131 		error = pvn_vplist_dirty(vp, off, smbfs_putapage,
4132 		    flags, cr);
4133 
4134 		/*
4135 		 * If an error occurred and the file was marked as dirty
4136 		 * before and we aren't forcibly invalidating pages, then
4137 		 * reset the RDIRTY flag.
4138 		 */
4139 		if (error && rdirty &&
4140 		    (flags & (B_INVAL | B_FORCE)) != (B_INVAL | B_FORCE)) {
4141 			mutex_enter(&np->r_statelock);
4142 			np->r_flags |= RDIRTY;
4143 			mutex_exit(&np->r_statelock);
4144 		}
4145 	} else {
4146 		/*
4147 		 * Do a range from [off...off + len) looking for pages
4148 		 * to deal with.
4149 		 */
4150 		error = 0;
4151 		io_len = 1; /* quiet warnings */
4152 		eoff = off + len;
4153 
4154 		for (io_off = off; io_off < eoff; io_off += io_len) {
4155 			mutex_enter(&np->r_statelock);
4156 			if (io_off >= np->r_size) {
4157 				mutex_exit(&np->r_statelock);
4158 				break;
4159 			}
4160 			mutex_exit(&np->r_statelock);
4161 			/*
4162 			 * If we are not invalidating, synchronously
4163 			 * freeing or writing pages use the routine
4164 			 * page_lookup_nowait() to prevent reclaiming
4165 			 * them from the free list.
4166 			 */
4167 			if ((flags & B_INVAL) || !(flags & B_ASYNC)) {
4168 				pp = page_lookup(vp, io_off,
4169 				    (flags & (B_INVAL | B_FREE)) ?
4170 				    SE_EXCL : SE_SHARED);
4171 			} else {
4172 				pp = page_lookup_nowait(vp, io_off,
4173 				    (flags & B_FREE) ? SE_EXCL : SE_SHARED);
4174 			}
4175 
4176 			if (pp == NULL || !pvn_getdirty(pp, flags))
4177 				io_len = PAGESIZE;
4178 			else {
4179 				err = smbfs_putapage(vp, pp, &io_off,
4180 				    &io_len, flags, cr);
4181 				if (!error)
4182 					error = err;
4183 				/*
4184 				 * "io_off" and "io_len" are returned as
4185 				 * the range of pages we actually wrote.
4186 				 * This allows us to skip ahead more quickly
4187 				 * since several pages may've been dealt
4188 				 * with by this iteration of the loop.
4189 				 */
4190 			}
4191 		}
4192 	}
4193 
4194 	return (error);
4195 
4196 #else	// _KERNEL
4197 	return (ENOSYS);
4198 #endif	// _KERNEL
4199 }
4200 
4201 #ifdef	_KERNEL
4202 
4203 /*
4204  * Write out a single page, possibly klustering adjacent dirty pages.
4205  *
4206  * Like nfs3_putapage / nfs3_sync_putapage
4207  */
4208 static int
4209 smbfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
4210 	int flags, cred_t *cr)
4211 {
4212 	smbnode_t *np;
4213 	u_offset_t io_off;
4214 	u_offset_t lbn_off;
4215 	u_offset_t lbn;
4216 	size_t io_len;
4217 	uint_t bsize;
4218 	int error;
4219 
4220 	np = VTOSMB(vp);
4221 
4222 	ASSERT(!vn_is_readonly(vp));
4223 
4224 	bsize = MAX(vp->v_vfsp->vfs_bsize, PAGESIZE);
4225 	lbn = pp->p_offset / bsize;
4226 	lbn_off = lbn * bsize;
4227 
4228 	/*
4229 	 * Find a kluster that fits in one block, or in
4230 	 * one page if pages are bigger than blocks.  If
4231 	 * there is less file space allocated than a whole
4232 	 * page, we'll shorten the i/o request below.
4233 	 */
4234 	pp = pvn_write_kluster(vp, pp, &io_off, &io_len, lbn_off,
4235 	    roundup(bsize, PAGESIZE), flags);
4236 
4237 	/*
4238 	 * pvn_write_kluster shouldn't have returned a page with offset
4239 	 * behind the original page we were given.  Verify that.
4240 	 */
4241 	ASSERT((pp->p_offset / bsize) >= lbn);
4242 
4243 	/*
4244 	 * Now pp will have the list of kept dirty pages marked for
4245 	 * write back.  It will also handle invalidation and freeing
4246 	 * of pages that are not dirty.  Check for page length rounding
4247 	 * problems.
4248 	 */
4249 	if (io_off + io_len > lbn_off + bsize) {
4250 		ASSERT((io_off + io_len) - (lbn_off + bsize) < PAGESIZE);
4251 		io_len = lbn_off + bsize - io_off;
4252 	}
4253 	/*
4254 	 * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a
4255 	 * consistent value of r_size. RMODINPROGRESS is set in writerp().
4256 	 * When RMODINPROGRESS is set it indicates that a uiomove() is in
4257 	 * progress and the r_size has not been made consistent with the
4258 	 * new size of the file. When the uiomove() completes the r_size is
4259 	 * updated and the RMODINPROGRESS flag is cleared.
4260 	 *
4261 	 * The RMODINPROGRESS flag makes sure that smbfs_bio() sees a
4262 	 * consistent value of r_size. Without this handshaking, it is
4263 	 * possible that smbfs_bio() picks  up the old value of r_size
4264 	 * before the uiomove() in writerp() completes. This will result
4265 	 * in the write through smbfs_bio() being dropped.
4266 	 *
4267 	 * More precisely, there is a window between the time the uiomove()
4268 	 * completes and the time the r_size is updated. If a VOP_PUTPAGE()
4269 	 * operation intervenes in this window, the page will be picked up,
4270 	 * because it is dirty (it will be unlocked, unless it was
4271 	 * pagecreate'd). When the page is picked up as dirty, the dirty
4272 	 * bit is reset (pvn_getdirty()). In smbfs_write(), r_size is
4273 	 * checked. This will still be the old size. Therefore the page will
4274 	 * not be written out. When segmap_release() calls VOP_PUTPAGE(),
4275 	 * the page will be found to be clean and the write will be dropped.
4276 	 */
4277 	if (np->r_flags & RMODINPROGRESS) {
4278 		mutex_enter(&np->r_statelock);
4279 		if ((np->r_flags & RMODINPROGRESS) &&
4280 		    np->r_modaddr + MAXBSIZE > io_off &&
4281 		    np->r_modaddr < io_off + io_len) {
4282 			page_t *plist;
4283 			/*
4284 			 * A write is in progress for this region of the file.
4285 			 * If we did not detect RMODINPROGRESS here then this
4286 			 * path through smbfs_putapage() would eventually go to
4287 			 * smbfs_bio() and may not write out all of the data
4288 			 * in the pages. We end up losing data. So we decide
4289 			 * to set the modified bit on each page in the page
4290 			 * list and mark the rnode with RDIRTY. This write
4291 			 * will be restarted at some later time.
4292 			 */
4293 			plist = pp;
4294 			while (plist != NULL) {
4295 				pp = plist;
4296 				page_sub(&plist, pp);
4297 				hat_setmod(pp);
4298 				page_io_unlock(pp);
4299 				page_unlock(pp);
4300 			}
4301 			np->r_flags |= RDIRTY;
4302 			mutex_exit(&np->r_statelock);
4303 			if (offp)
4304 				*offp = io_off;
4305 			if (lenp)
4306 				*lenp = io_len;
4307 			return (0);
4308 		}
4309 		mutex_exit(&np->r_statelock);
4310 	}
4311 
4312 	/*
4313 	 * NFS handles (flags & B_ASYNC) here...
4314 	 * (See nfs_async_putapage())
4315 	 *
4316 	 * This code section from: nfs3_sync_putapage()
4317 	 */
4318 
4319 	flags |= B_WRITE;
4320 
4321 	error = smbfs_rdwrlbn(vp, pp, io_off, io_len, flags, cr);
4322 
4323 	if ((error == ENOSPC || error == EDQUOT || error == EFBIG ||
4324 	    error == EACCES) &&
4325 	    (flags & (B_INVAL|B_FORCE)) != (B_INVAL|B_FORCE)) {
4326 		if (!(np->r_flags & ROUTOFSPACE)) {
4327 			mutex_enter(&np->r_statelock);
4328 			np->r_flags |= ROUTOFSPACE;
4329 			mutex_exit(&np->r_statelock);
4330 		}
4331 		flags |= B_ERROR;
4332 		pvn_write_done(pp, flags);
4333 		/*
4334 		 * If this was not an async thread, then try again to
4335 		 * write out the pages, but this time, also destroy
4336 		 * them whether or not the write is successful.  This
4337 		 * will prevent memory from filling up with these
4338 		 * pages and destroying them is the only alternative
4339 		 * if they can't be written out.
4340 		 *
4341 		 * Don't do this if this is an async thread because
4342 		 * when the pages are unlocked in pvn_write_done,
4343 		 * some other thread could have come along, locked
4344 		 * them, and queued for an async thread.  It would be
4345 		 * possible for all of the async threads to be tied
4346 		 * up waiting to lock the pages again and they would
4347 		 * all already be locked and waiting for an async
4348 		 * thread to handle them.  Deadlock.
4349 		 */
4350 		if (!(flags & B_ASYNC)) {
4351 			error = smbfs_putpage(vp, io_off, io_len,
4352 			    B_INVAL | B_FORCE, cr, NULL);
4353 		}
4354 	} else {
4355 		if (error)
4356 			flags |= B_ERROR;
4357 		else if (np->r_flags & ROUTOFSPACE) {
4358 			mutex_enter(&np->r_statelock);
4359 			np->r_flags &= ~ROUTOFSPACE;
4360 			mutex_exit(&np->r_statelock);
4361 		}
4362 		pvn_write_done(pp, flags);
4363 	}
4364 
4365 	/* Now more code from: nfs3_putapage */
4366 
4367 	if (offp)
4368 		*offp = io_off;
4369 	if (lenp)
4370 		*lenp = io_len;
4371 
4372 	return (error);
4373 }
4374 
4375 #endif	// _KERNEL
4376 
4377 
4378 /*
4379  * NFS has this in nfs_client.c (shared by v2,v3,...)
4380  * We have it here so smbfs_putapage can be file scope.
4381  */
4382 void
4383 smbfs_invalidate_pages(vnode_t *vp, u_offset_t off, cred_t *cr)
4384 {
4385 	smbnode_t *np;
4386 
4387 	np = VTOSMB(vp);
4388 
4389 	mutex_enter(&np->r_statelock);
4390 	while (np->r_flags & RTRUNCATE)
4391 		cv_wait(&np->r_cv, &np->r_statelock);
4392 	np->r_flags |= RTRUNCATE;
4393 
4394 	if (off == (u_offset_t)0) {
4395 		np->r_flags &= ~RDIRTY;
4396 		if (!(np->r_flags & RSTALE))
4397 			np->r_error = 0;
4398 	}
4399 	/* Here NFSv3 has np->r_truncaddr = off; */
4400 	mutex_exit(&np->r_statelock);
4401 
4402 #ifdef	_KERNEL
4403 	(void) pvn_vplist_dirty(vp, off, smbfs_putapage,
4404 	    B_INVAL | B_TRUNC, cr);
4405 #endif	// _KERNEL
4406 
4407 	mutex_enter(&np->r_statelock);
4408 	np->r_flags &= ~RTRUNCATE;
4409 	cv_broadcast(&np->r_cv);
4410 	mutex_exit(&np->r_statelock);
4411 }
4412 
4413 #ifdef	_KERNEL
4414 
4415 /* Like nfs3_map */
4416 
4417 /* ARGSUSED */
4418 static int
4419 smbfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
4420 	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
4421 	cred_t *cr, caller_context_t *ct)
4422 {
4423 	segvn_crargs_t	vn_a;
4424 	struct vattr	va;
4425 	smbnode_t	*np;
4426 	smbmntinfo_t	*smi;
4427 	int		error;
4428 
4429 	np = VTOSMB(vp);
4430 	smi = VTOSMI(vp);
4431 
4432 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4433 		return (EIO);
4434 
4435 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4436 		return (EIO);
4437 
4438 	if (vp->v_flag & VNOMAP)
4439 		return (ENOSYS);
4440 
4441 	if (off < 0 || off + (ssize_t)len < 0)
4442 		return (ENXIO);
4443 
4444 	if (vp->v_type != VREG)
4445 		return (ENODEV);
4446 
4447 	/*
4448 	 * NFS does close-to-open consistency stuff here.
4449 	 * Just get (possibly cached) attributes.
4450 	 */
4451 	va.va_mask = AT_ALL;
4452 	if ((error = smbfsgetattr(vp, &va, cr)) != 0)
4453 		return (error);
4454 
4455 	/*
4456 	 * Check to see if the vnode is currently marked as not cachable.
4457 	 * This means portions of the file are locked (through VOP_FRLOCK).
4458 	 * In this case the map request must be refused.  We use
4459 	 * rp->r_lkserlock to avoid a race with concurrent lock requests.
4460 	 */
4461 	/*
4462 	 * Atomically increment r_inmap after acquiring r_rwlock. The
4463 	 * idea here is to acquire r_rwlock to block read/write and
4464 	 * not to protect r_inmap. r_inmap will inform smbfs_read/write()
4465 	 * that we are in smbfs_map(). Now, r_rwlock is acquired in order
4466 	 * and we can prevent the deadlock that would have occurred
4467 	 * when smbfs_addmap() would have acquired it out of order.
4468 	 *
4469 	 * Since we are not protecting r_inmap by any lock, we do not
4470 	 * hold any lock when we decrement it. We atomically decrement
4471 	 * r_inmap after we release r_lkserlock.  Note that rwlock is
4472 	 * re-entered as writer in smbfs_addmap (called via as_map).
4473 	 */
4474 
4475 	if (smbfs_rw_enter_sig(&np->r_rwlock, RW_WRITER, SMBINTR(vp)))
4476 		return (EINTR);
4477 	atomic_inc_uint(&np->r_inmap);
4478 	smbfs_rw_exit(&np->r_rwlock);
4479 
4480 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp))) {
4481 		atomic_dec_uint(&np->r_inmap);
4482 		return (EINTR);
4483 	}
4484 
4485 	if (vp->v_flag & VNOCACHE) {
4486 		error = EAGAIN;
4487 		goto done;
4488 	}
4489 
4490 	/*
4491 	 * Don't allow concurrent locks and mapping if mandatory locking is
4492 	 * enabled.
4493 	 */
4494 	if ((flk_has_remote_locks(vp) || smbfs_lm_has_sleep(vp)) &&
4495 	    MANDLOCK(vp, va.va_mode)) {
4496 		error = EAGAIN;
4497 		goto done;
4498 	}
4499 
4500 	as_rangelock(as);
4501 	error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
4502 	if (error != 0) {
4503 		as_rangeunlock(as);
4504 		goto done;
4505 	}
4506 
4507 	vn_a.vp = vp;
4508 	vn_a.offset = off;
4509 	vn_a.type = (flags & MAP_TYPE);
4510 	vn_a.prot = (uchar_t)prot;
4511 	vn_a.maxprot = (uchar_t)maxprot;
4512 	vn_a.flags = (flags & ~MAP_TYPE);
4513 	vn_a.cred = cr;
4514 	vn_a.amp = NULL;
4515 	vn_a.szc = 0;
4516 	vn_a.lgrp_mem_policy_flags = 0;
4517 
4518 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
4519 	as_rangeunlock(as);
4520 
4521 done:
4522 	smbfs_rw_exit(&np->r_lkserlock);
4523 	atomic_dec_uint(&np->r_inmap);
4524 	return (error);
4525 }
4526 
4527 /*
4528  * This uses addmap/delmap functions to hold the SMB FID open as long as
4529  * there are pages mapped in this as/seg.  Increment the FID refs. when
4530  * the maping count goes from zero to non-zero, and release the FID ref
4531  * when the maping count goes from non-zero to zero.
4532  */
4533 
4534 /* ARGSUSED */
4535 static int
4536 smbfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4537 	size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
4538 	cred_t *cr, caller_context_t *ct)
4539 {
4540 	smbnode_t *np = VTOSMB(vp);
4541 	boolean_t inc_fidrefs = B_FALSE;
4542 
4543 	/*
4544 	 * When r_mapcnt goes from zero to non-zero,
4545 	 * increment n_fidrefs
4546 	 */
4547 	mutex_enter(&np->r_statelock);
4548 	if (np->r_mapcnt == 0)
4549 		inc_fidrefs = B_TRUE;
4550 	np->r_mapcnt += btopr(len);
4551 	mutex_exit(&np->r_statelock);
4552 
4553 	if (inc_fidrefs) {
4554 		(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
4555 		np->n_fidrefs++;
4556 		smbfs_rw_exit(&np->r_lkserlock);
4557 	}
4558 
4559 	return (0);
4560 }
4561 
4562 /*
4563  * Args passed to smbfs_delmap_async
4564  */
4565 typedef struct smbfs_delmap_args {
4566 	taskq_ent_t		dm_tqent;
4567 	cred_t			*dm_cr;
4568 	vnode_t			*dm_vp;
4569 	offset_t		dm_off;
4570 	caddr_t			dm_addr;
4571 	size_t			dm_len;
4572 	uint_t			dm_prot;
4573 	uint_t			dm_maxprot;
4574 	uint_t			dm_flags;
4575 	boolean_t		dm_rele_fid;
4576 } smbfs_delmap_args_t;
4577 
4578 /*
4579  * Using delmap not only to release the SMB FID (as described above)
4580  * but to flush dirty pages as needed.  Both of those do the actual
4581  * work in an async taskq job to avoid interfering with locks held
4582  * in the VM layer when this is called.
4583  */
4584 
4585 /* ARGSUSED */
4586 static int
4587 smbfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
4588 	size_t len, uint_t prot, uint_t maxprot, uint_t flags,
4589 	cred_t *cr, caller_context_t *ct)
4590 {
4591 	smbnode_t		*np = VTOSMB(vp);
4592 	smbmntinfo_t		*smi = VTOSMI(vp);
4593 	smbfs_delmap_args_t	*dmapp;
4594 
4595 	dmapp = kmem_zalloc(sizeof (*dmapp), KM_SLEEP);
4596 
4597 	/*
4598 	 * The VM layer may segvn_free the seg holding this vnode
4599 	 * before our callback has a chance run, so take a hold on
4600 	 * the vnode here and release it in the callback.
4601 	 * (same for the cred)
4602 	 */
4603 	crhold(cr);
4604 	VN_HOLD(vp);
4605 
4606 	dmapp->dm_vp = vp;
4607 	dmapp->dm_cr = cr;
4608 	dmapp->dm_off = off;
4609 	dmapp->dm_addr = addr;
4610 	dmapp->dm_len = len;
4611 	dmapp->dm_prot = prot;
4612 	dmapp->dm_maxprot = maxprot;
4613 	dmapp->dm_flags = flags;
4614 	dmapp->dm_rele_fid = B_FALSE;
4615 
4616 	/*
4617 	 * Go ahead and decrement r_mapcount now, which is
4618 	 * the primary purpose of this function.
4619 	 *
4620 	 * When r_mapcnt goes to zero, we need to call
4621 	 * smbfs_rele_fid, but can't do that here, so
4622 	 * set a flag telling the async task to do it.
4623 	 */
4624 	mutex_enter(&np->r_statelock);
4625 	np->r_mapcnt -= btopr(len);
4626 	ASSERT(np->r_mapcnt >= 0);
4627 	if (np->r_mapcnt == 0)
4628 		dmapp->dm_rele_fid = B_TRUE;
4629 	mutex_exit(&np->r_statelock);
4630 
4631 	taskq_dispatch_ent(smi->smi_taskq, smbfs_delmap_async, dmapp, 0,
4632 	    &dmapp->dm_tqent);
4633 
4634 	return (0);
4635 }
4636 
4637 /*
4638  * Remove some pages from an mmap'd vnode.  Flush any
4639  * dirty pages in the unmapped range.
4640  */
4641 /* ARGSUSED */
4642 static void
4643 smbfs_delmap_async(void *varg)
4644 {
4645 	smbfs_delmap_args_t	*dmapp = varg;
4646 	cred_t			*cr;
4647 	vnode_t			*vp;
4648 	smbnode_t		*np;
4649 	smbmntinfo_t		*smi;
4650 
4651 	cr = dmapp->dm_cr;
4652 	vp = dmapp->dm_vp;
4653 	np = VTOSMB(vp);
4654 	smi = VTOSMI(vp);
4655 
4656 	/* Decremented r_mapcnt in smbfs_delmap */
4657 
4658 	/*
4659 	 * Initiate a page flush and potential commit if there are
4660 	 * pages, the file system was not mounted readonly, the segment
4661 	 * was mapped shared, and the pages themselves were writeable.
4662 	 *
4663 	 * mark RDIRTY here, will be used to check if a file is dirty when
4664 	 * unmount smbfs
4665 	 */
4666 	if (vn_has_cached_data(vp) && !vn_is_readonly(vp) &&
4667 	    dmapp->dm_flags == MAP_SHARED &&
4668 	    (dmapp->dm_maxprot & PROT_WRITE) != 0) {
4669 		mutex_enter(&np->r_statelock);
4670 		np->r_flags |= RDIRTY;
4671 		mutex_exit(&np->r_statelock);
4672 
4673 		/*
4674 		 * Need to finish the putpage before we
4675 		 * close the OtW FID needed for I/O.
4676 		 */
4677 		(void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len, 0,
4678 		    dmapp->dm_cr, NULL);
4679 	}
4680 
4681 	if ((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO))
4682 		(void) smbfs_putpage(vp, dmapp->dm_off, dmapp->dm_len,
4683 		    B_INVAL, dmapp->dm_cr, NULL);
4684 
4685 	/*
4686 	 * If r_mapcnt went to zero, drop our FID ref now.
4687 	 * On the last fidref, this does an OtW close.
4688 	 */
4689 	if (dmapp->dm_rele_fid) {
4690 		struct smb_cred scred;
4691 
4692 		(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
4693 		smb_credinit(&scred, dmapp->dm_cr);
4694 
4695 		smbfs_rele_fid(np, &scred);
4696 
4697 		smb_credrele(&scred);
4698 		smbfs_rw_exit(&np->r_lkserlock);
4699 	}
4700 
4701 	/* Release holds taken in smbfs_delmap */
4702 	VN_RELE(vp);
4703 	crfree(cr);
4704 
4705 	kmem_free(dmapp, sizeof (*dmapp));
4706 }
4707 
4708 /* No smbfs_pageio() or smbfs_dispose() ops. */
4709 
4710 #endif	// _KERNEL
4711 
4712 /* misc. ******************************************************** */
4713 
4714 
4715 /*
4716  * XXX
4717  * This op may need to support PSARC 2007/440, nbmand changes for CIFS Service.
4718  */
4719 static int
4720 smbfs_frlock(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4721 	offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
4722 	caller_context_t *ct)
4723 {
4724 	if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
4725 		return (EIO);
4726 
4727 	if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
4728 		return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4729 	else
4730 		return (ENOSYS);
4731 }
4732 
4733 /*
4734  * Free storage space associated with the specified vnode.  The portion
4735  * to be freed is specified by bfp->l_start and bfp->l_len (already
4736  * normalized to a "whence" of 0).
4737  *
4738  * Called by fcntl(fd, F_FREESP, lkp) for libc:ftruncate, etc.
4739  */
4740 /* ARGSUSED */
4741 static int
4742 smbfs_space(vnode_t *vp, int cmd, struct flock64 *bfp, int flag,
4743 	offset_t offset, cred_t *cr, caller_context_t *ct)
4744 {
4745 	int		error;
4746 	smbmntinfo_t	*smi;
4747 
4748 	smi = VTOSMI(vp);
4749 
4750 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4751 		return (EIO);
4752 
4753 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4754 		return (EIO);
4755 
4756 	/* Caller (fcntl) has checked v_type */
4757 	ASSERT(vp->v_type == VREG);
4758 	if (cmd != F_FREESP)
4759 		return (EINVAL);
4760 
4761 	/*
4762 	 * Like NFS3, no 32-bit offset checks here.
4763 	 * Our SMB layer takes care to return EFBIG
4764 	 * when it has to fallback to a 32-bit call.
4765 	 */
4766 
4767 	error = convoff(vp, bfp, 0, offset);
4768 	if (!error) {
4769 		ASSERT(bfp->l_start >= 0);
4770 		if (bfp->l_len == 0) {
4771 			struct vattr va;
4772 
4773 			/*
4774 			 * ftruncate should not change the ctime and
4775 			 * mtime if we truncate the file to its
4776 			 * previous size.
4777 			 */
4778 			va.va_mask = AT_SIZE;
4779 			error = smbfsgetattr(vp, &va, cr);
4780 			if (error || va.va_size == bfp->l_start)
4781 				return (error);
4782 			va.va_mask = AT_SIZE;
4783 			va.va_size = bfp->l_start;
4784 			error = smbfssetattr(vp, &va, 0, cr);
4785 			/* SMBFS_VNEVENT... */
4786 		} else
4787 			error = EINVAL;
4788 	}
4789 
4790 	return (error);
4791 }
4792 
4793 
4794 /* ARGSUSED */
4795 static int
4796 smbfs_realvp(vnode_t *vp, vnode_t **vpp, caller_context_t *ct)
4797 {
4798 
4799 	return (ENOSYS);
4800 }
4801 
4802 
4803 /* ARGSUSED */
4804 static int
4805 smbfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
4806 	caller_context_t *ct)
4807 {
4808 	vfs_t *vfs;
4809 	smbmntinfo_t *smi;
4810 	struct smb_share *ssp;
4811 
4812 	vfs = vp->v_vfsp;
4813 	smi = VFTOSMI(vfs);
4814 
4815 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4816 		return (EIO);
4817 
4818 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
4819 		return (EIO);
4820 
4821 	switch (cmd) {
4822 	case _PC_FILESIZEBITS:
4823 		ssp = smi->smi_share;
4824 		if (SSTOVC(ssp)->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES)
4825 			*valp = 64;
4826 		else
4827 			*valp = 32;
4828 		break;
4829 
4830 	case _PC_LINK_MAX:
4831 		/* We only ever report one link to an object */
4832 		*valp = 1;
4833 		break;
4834 
4835 	case _PC_ACL_ENABLED:
4836 		/*
4837 		 * Always indicate that ACLs are enabled and
4838 		 * that we support ACE_T format, otherwise
4839 		 * libsec will ask for ACLENT_T format data
4840 		 * which we don't support.
4841 		 */
4842 		*valp = _ACL_ACE_ENABLED;
4843 		break;
4844 
4845 	case _PC_SYMLINK_MAX:	/* No symlinks until we do Unix extensions */
4846 		*valp = 0;
4847 		break;
4848 
4849 	case _PC_XATTR_EXISTS:
4850 		if (vfs->vfs_flag & VFS_XATTR) {
4851 			*valp = smbfs_xa_exists(vp, cr);
4852 			break;
4853 		}
4854 		return (EINVAL);
4855 
4856 	case _PC_SATTR_ENABLED:
4857 	case _PC_SATTR_EXISTS:
4858 		*valp = 1;
4859 		break;
4860 
4861 	case _PC_TIMESTAMP_RESOLUTION:
4862 		/*
4863 		 * Windows times are tenths of microseconds
4864 		 * (multiples of 100 nanoseconds).
4865 		 */
4866 		*valp = 100L;
4867 		break;
4868 
4869 	default:
4870 		return (fs_pathconf(vp, cmd, valp, cr, ct));
4871 	}
4872 	return (0);
4873 }
4874 
4875 /* ARGSUSED */
4876 static int
4877 smbfs_getsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
4878 	caller_context_t *ct)
4879 {
4880 	vfs_t *vfsp;
4881 	smbmntinfo_t *smi;
4882 	int	error;
4883 	uint_t	mask;
4884 
4885 	vfsp = vp->v_vfsp;
4886 	smi = VFTOSMI(vfsp);
4887 
4888 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4889 		return (EIO);
4890 
4891 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
4892 		return (EIO);
4893 
4894 	/*
4895 	 * Our _pathconf indicates _ACL_ACE_ENABLED,
4896 	 * so we should only see VSA_ACE, etc here.
4897 	 * Note: vn_create asks for VSA_DFACLCNT,
4898 	 * and it expects ENOSYS and empty data.
4899 	 */
4900 	mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT |
4901 	    VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
4902 	if (mask == 0)
4903 		return (ENOSYS);
4904 
4905 	if (smi->smi_flags & SMI_ACL)
4906 		error = smbfs_acl_getvsa(vp, vsa, flag, cr);
4907 	else
4908 		error = ENOSYS;
4909 
4910 	if (error == ENOSYS)
4911 		error = fs_fab_acl(vp, vsa, flag, cr, ct);
4912 
4913 	return (error);
4914 }
4915 
4916 /* ARGSUSED */
4917 static int
4918 smbfs_setsecattr(vnode_t *vp, vsecattr_t *vsa, int flag, cred_t *cr,
4919 	caller_context_t *ct)
4920 {
4921 	vfs_t *vfsp;
4922 	smbmntinfo_t *smi;
4923 	int	error;
4924 	uint_t	mask;
4925 
4926 	vfsp = vp->v_vfsp;
4927 	smi = VFTOSMI(vfsp);
4928 
4929 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
4930 		return (EIO);
4931 
4932 	if (smi->smi_flags & SMI_DEAD || vfsp->vfs_flag & VFS_UNMOUNTED)
4933 		return (EIO);
4934 
4935 	/*
4936 	 * Our _pathconf indicates _ACL_ACE_ENABLED,
4937 	 * so we should only see VSA_ACE, etc here.
4938 	 */
4939 	mask = vsa->vsa_mask & (VSA_ACE | VSA_ACECNT);
4940 	if (mask == 0)
4941 		return (ENOSYS);
4942 
4943 	if (vfsp->vfs_flag & VFS_RDONLY)
4944 		return (EROFS);
4945 
4946 	/*
4947 	 * Allow only the mount owner to do this.
4948 	 * See comments at smbfs_access_rwx.
4949 	 */
4950 	error = secpolicy_vnode_setdac(cr, smi->smi_uid);
4951 	if (error != 0)
4952 		return (error);
4953 
4954 	if (smi->smi_flags & SMI_ACL)
4955 		error = smbfs_acl_setvsa(vp, vsa, flag, cr);
4956 	else
4957 		error = ENOSYS;
4958 
4959 	return (error);
4960 }
4961 
4962 
4963 /*
4964  * XXX
4965  * This op should eventually support PSARC 2007/268.
4966  */
4967 static int
4968 smbfs_shrlock(vnode_t *vp, int cmd, struct shrlock *shr, int flag, cred_t *cr,
4969 	caller_context_t *ct)
4970 {
4971 	if (curproc->p_zone != VTOSMI(vp)->smi_zone_ref.zref_zone)
4972 		return (EIO);
4973 
4974 	if (VTOSMI(vp)->smi_flags & SMI_LLOCK)
4975 		return (fs_shrlock(vp, cmd, shr, flag, cr, ct));
4976 	else
4977 		return (ENOSYS);
4978 }
4979 
4980 
4981 /*
4982  * Most unimplemented ops will return ENOSYS because of fs_nosys().
4983  * The only ops where that won't work are ACCESS (due to open(2)
4984  * failures) and ... (anything else left?)
4985  */
4986 const fs_operation_def_t smbfs_vnodeops_template[] = {
4987 	VOPNAME_OPEN,		{ .vop_open = smbfs_open },
4988 	VOPNAME_CLOSE,		{ .vop_close = smbfs_close },
4989 	VOPNAME_READ,		{ .vop_read = smbfs_read },
4990 	VOPNAME_WRITE,		{ .vop_write = smbfs_write },
4991 	VOPNAME_IOCTL,		{ .vop_ioctl = smbfs_ioctl },
4992 	VOPNAME_GETATTR,	{ .vop_getattr = smbfs_getattr },
4993 	VOPNAME_SETATTR,	{ .vop_setattr = smbfs_setattr },
4994 	VOPNAME_ACCESS,		{ .vop_access = smbfs_access },
4995 	VOPNAME_LOOKUP,		{ .vop_lookup = smbfs_lookup },
4996 	VOPNAME_CREATE,		{ .vop_create = smbfs_create },
4997 	VOPNAME_REMOVE,		{ .vop_remove = smbfs_remove },
4998 	VOPNAME_LINK,		{ .vop_link = smbfs_link },
4999 	VOPNAME_RENAME,		{ .vop_rename = smbfs_rename },
5000 	VOPNAME_MKDIR,		{ .vop_mkdir = smbfs_mkdir },
5001 	VOPNAME_RMDIR,		{ .vop_rmdir = smbfs_rmdir },
5002 	VOPNAME_READDIR,	{ .vop_readdir = smbfs_readdir },
5003 	VOPNAME_SYMLINK,	{ .vop_symlink = smbfs_symlink },
5004 	VOPNAME_READLINK,	{ .vop_readlink = smbfs_readlink },
5005 	VOPNAME_FSYNC,		{ .vop_fsync = smbfs_fsync },
5006 	VOPNAME_INACTIVE,	{ .vop_inactive = smbfs_inactive },
5007 	VOPNAME_FID,		{ .vop_fid = smbfs_fid },
5008 	VOPNAME_RWLOCK,		{ .vop_rwlock = smbfs_rwlock },
5009 	VOPNAME_RWUNLOCK,	{ .vop_rwunlock = smbfs_rwunlock },
5010 	VOPNAME_SEEK,		{ .vop_seek = smbfs_seek },
5011 	VOPNAME_FRLOCK,		{ .vop_frlock = smbfs_frlock },
5012 	VOPNAME_SPACE,		{ .vop_space = smbfs_space },
5013 	VOPNAME_REALVP,		{ .vop_realvp = smbfs_realvp },
5014 #ifdef	_KERNEL
5015 	VOPNAME_GETPAGE,	{ .vop_getpage = smbfs_getpage },
5016 	VOPNAME_PUTPAGE,	{ .vop_putpage = smbfs_putpage },
5017 	VOPNAME_MAP,		{ .vop_map = smbfs_map },
5018 	VOPNAME_ADDMAP,		{ .vop_addmap = smbfs_addmap },
5019 	VOPNAME_DELMAP,		{ .vop_delmap = smbfs_delmap },
5020 #endif	// _KERNEL
5021 	VOPNAME_PATHCONF,	{ .vop_pathconf = smbfs_pathconf },
5022 	VOPNAME_SETSECATTR,	{ .vop_setsecattr = smbfs_setsecattr },
5023 	VOPNAME_GETSECATTR,	{ .vop_getsecattr = smbfs_getsecattr },
5024 	VOPNAME_SHRLOCK,	{ .vop_shrlock = smbfs_shrlock },
5025 #ifdef	SMBFS_VNEVENT
5026 	VOPNAME_VNEVENT,	{ .vop_vnevent = fs_vnevent_support },
5027 #endif
5028 	{ NULL, NULL }
5029 };
5030