14bff34e3Sthurlow /*
24bff34e3Sthurlow  * Copyright (c) 2000-2001 Boris Popov
34bff34e3Sthurlow  * All rights reserved.
44bff34e3Sthurlow  *
54bff34e3Sthurlow  * Redistribution and use in source and binary forms, with or without
64bff34e3Sthurlow  * modification, are permitted provided that the following conditions
74bff34e3Sthurlow  * are met:
84bff34e3Sthurlow  * 1. Redistributions of source code must retain the above copyright
94bff34e3Sthurlow  *    notice, this list of conditions and the following disclaimer.
104bff34e3Sthurlow  * 2. Redistributions in binary form must reproduce the above copyright
114bff34e3Sthurlow  *    notice, this list of conditions and the following disclaimer in the
124bff34e3Sthurlow  *    documentation and/or other materials provided with the distribution.
134bff34e3Sthurlow  * 3. All advertising materials mentioning features or use of this software
144bff34e3Sthurlow  *    must display the following acknowledgement:
154bff34e3Sthurlow  *    This product includes software developed by Boris Popov.
164bff34e3Sthurlow  * 4. Neither the name of the author nor the names of any co-contributors
174bff34e3Sthurlow  *    may be used to endorse or promote products derived from this software
184bff34e3Sthurlow  *    without specific prior written permission.
194bff34e3Sthurlow  *
204bff34e3Sthurlow  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
214bff34e3Sthurlow  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
224bff34e3Sthurlow  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
234bff34e3Sthurlow  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
244bff34e3Sthurlow  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
254bff34e3Sthurlow  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
264bff34e3Sthurlow  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
274bff34e3Sthurlow  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
284bff34e3Sthurlow  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
294bff34e3Sthurlow  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
304bff34e3Sthurlow  * SUCH DAMAGE.
314bff34e3Sthurlow  *
324bff34e3Sthurlow  * $Id: smbfs_vnops.c,v 1.128.36.1 2005/05/27 02:35:28 lindak Exp $
334bff34e3Sthurlow  */
344bff34e3Sthurlow 
354bff34e3Sthurlow /*
36134a1f4eSCasper H.S. Dik  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37*168091e5SGordon Ross  * Copyright 2021 Tintri by DDN, Inc.  All rights reserved.
384bff34e3Sthurlow  */
394bff34e3Sthurlow 
405f4fc069Sjilinxpd /*
415f4fc069Sjilinxpd  * Vnode operations
425f4fc069Sjilinxpd  *
435f4fc069Sjilinxpd  * This file is similar to nfs3_vnops.c
445f4fc069Sjilinxpd  */
455f4fc069Sjilinxpd 
465f4fc069Sjilinxpd #include <sys/param.h>
474bff34e3Sthurlow #include <sys/systm.h>
484bff34e3Sthurlow #include <sys/cred.h>
494bff34e3Sthurlow #include <sys/vnode.h>
504bff34e3Sthurlow #include <sys/vfs.h>
517568150aSgwr #include <sys/filio.h>
524bff34e3Sthurlow #include <sys/uio.h>
534bff34e3Sthurlow #include <sys/dirent.h>
544bff34e3Sthurlow #include <sys/errno.h>
55613a2f6bSGordon Ross #include <sys/sunddi.h>
564bff34e3Sthurlow #include <sys/sysmacros.h>
574bff34e3Sthurlow #include <sys/kmem.h>
584bff34e3Sthurlow #include <sys/cmn_err.h>
594bff34e3Sthurlow #include <sys/vfs_opreg.h>
604bff34e3Sthurlow #include <sys/policy.h>
615f4fc069Sjilinxpd #include <sys/sdt.h>
624e72ade1SGordon Ross #include <sys/taskq_impl.h>
635f4fc069Sjilinxpd #include <sys/zone.h>
645f4fc069Sjilinxpd 
658329232eSGordon Ross #ifdef	_KERNEL
668329232eSGordon Ross #include <sys/vmsystm.h>	// for desfree
675f4fc069Sjilinxpd #include <vm/hat.h>
685f4fc069Sjilinxpd #include <vm/as.h>
695f4fc069Sjilinxpd #include <vm/page.h>
705f4fc069Sjilinxpd #include <vm/pvn.h>
715f4fc069Sjilinxpd #include <vm/seg.h>
725f4fc069Sjilinxpd #include <vm/seg_map.h>
735f4fc069Sjilinxpd #include <vm/seg_kpm.h>
745f4fc069Sjilinxpd #include <vm/seg_vn.h>
758329232eSGordon Ross #endif	// _KERNEL
764bff34e3Sthurlow 
774bff34e3Sthurlow #include <netsmb/smb_osdep.h>
784bff34e3Sthurlow #include <netsmb/smb.h>
794bff34e3Sthurlow #include <netsmb/smb_conn.h>
804bff34e3Sthurlow #include <netsmb/smb_subr.h>
814bff34e3Sthurlow 
824bff34e3Sthurlow #include <smbfs/smbfs.h>
834bff34e3Sthurlow #include <smbfs/smbfs_node.h>
844bff34e3Sthurlow #include <smbfs/smbfs_subr.h>
854bff34e3Sthurlow 
867568150aSgwr #include <sys/fs/smbfs_ioctl.h>
874bff34e3Sthurlow #include <fs/fs_subr.h>
884bff34e3Sthurlow 
898329232eSGordon Ross #ifndef	MAXOFF32_T
908329232eSGordon Ross #define	MAXOFF32_T	0x7fffffff
918329232eSGordon Ross #endif
928329232eSGordon Ross 
935ecede33SGordon Ross /*
945ecede33SGordon Ross  * We assign directory offsets like the NFS client, where the
955ecede33SGordon Ross  * offset increments by _one_ after each directory entry.
965ecede33SGordon Ross  * Further, the entries "." and ".." are always at offsets
975ecede33SGordon Ross  * zero and one (respectively) and the "real" entries from
985ecede33SGordon Ross  * the server appear at offsets starting with two.  This
995ecede33SGordon Ross  * macro is used to initialize the n_dirofs field after
1005ecede33SGordon Ross  * setting n_dirseq with a _findopen call.
1015ecede33SGordon Ross  */
1025ecede33SGordon Ross #define	FIRST_DIROFS	2
1035ecede33SGordon Ross 
1044bff34e3Sthurlow /*
1054bff34e3Sthurlow  * These characters are illegal in NTFS file names.
1064bff34e3Sthurlow  * ref: http://support.microsoft.com/kb/147438
10791d632c8Sgwr  *
10891d632c8Sgwr  * Careful!  The check in the XATTR case skips the
10991d632c8Sgwr  * first character to allow colon in XATTR names.
1104bff34e3Sthurlow  */
1114bff34e3Sthurlow static const char illegal_chars[] = {
11291d632c8Sgwr 	':',	/* colon - keep this first! */
1134bff34e3Sthurlow 	'\\',	/* back slash */
1144bff34e3Sthurlow 	'/',	/* slash */
1154bff34e3Sthurlow 	'*',	/* asterisk */
1164bff34e3Sthurlow 	'?',	/* question mark */
1174bff34e3Sthurlow 	'"',	/* double quote */
1184bff34e3Sthurlow 	'<',	/* less than sign */
1194bff34e3Sthurlow 	'>',	/* greater than sign */
1204bff34e3Sthurlow 	'|',	/* vertical bar */
1214bff34e3Sthurlow 	0
1224bff34e3Sthurlow };
1234bff34e3Sthurlow 
1244bff34e3Sthurlow /*
1254bff34e3Sthurlow  * Turning this on causes nodes to be created in the cache
12602d09e03SGordon Ross  * during directory listings, normally avoiding a second
12702d09e03SGordon Ross  * OtW attribute fetch just after a readdir.
1284bff34e3Sthurlow  */
12902d09e03SGordon Ross int smbfs_fastlookup = 1;
1304bff34e3Sthurlow 
1315f4fc069Sjilinxpd struct vnodeops *smbfs_vnodeops = NULL;
1325f4fc069Sjilinxpd 
1334bff34e3Sthurlow /* local static function defines */
1344bff34e3Sthurlow 
13502d09e03SGordon Ross static int	smbfslookup_cache(vnode_t *, char *, int, vnode_t **,
13602d09e03SGordon Ross 			cred_t *);
1374bff34e3Sthurlow static int	smbfslookup(vnode_t *dvp, char *nm, vnode_t **vpp, cred_t *cr,
13802d09e03SGordon Ross 			int cache_ok, caller_context_t *);
139ff1e230cSjilinxpd static int	smbfsremove(vnode_t *dvp, vnode_t *vp, struct smb_cred *scred,
140ff1e230cSjilinxpd 			int flags);
141ff1e230cSjilinxpd static int	smbfsrename(vnode_t *odvp, vnode_t *ovp, vnode_t *ndvp,
142ff1e230cSjilinxpd 			char *nnm, struct smb_cred *scred, int flags);
1434bff34e3Sthurlow static int	smbfssetattr(vnode_t *, struct vattr *, int, cred_t *);
1444bff34e3Sthurlow static int	smbfs_accessx(void *, int, cred_t *);
1454bff34e3Sthurlow static int	smbfs_readvdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp,
1464bff34e3Sthurlow 			caller_context_t *);
147adee6784SGordon Ross static int	smbfsflush(smbnode_t *, struct smb_cred *);
14842d15982SGordon Ross static void	smbfs_rele_fid(smbnode_t *, struct smb_cred *);
14928162916SGordon Ross static uint32_t xvattr_to_dosattr(smbnode_t *, struct vattr *);
15042d15982SGordon Ross 
1515f4fc069Sjilinxpd static int	smbfs_fsync(vnode_t *, int, cred_t *, caller_context_t *);
1528329232eSGordon Ross 
1535f4fc069Sjilinxpd static int	smbfs_putpage(vnode_t *, offset_t, size_t, int, cred_t *,
1545f4fc069Sjilinxpd 			caller_context_t *);
1558329232eSGordon Ross #ifdef	_KERNEL
1565f4fc069Sjilinxpd static int	smbfs_getapage(vnode_t *, u_offset_t, size_t, uint_t *,
1575f4fc069Sjilinxpd 			page_t *[], size_t, struct seg *, caddr_t,
1585f4fc069Sjilinxpd 			enum seg_rw, cred_t *);
1595f4fc069Sjilinxpd static int	smbfs_putapage(vnode_t *, page_t *, u_offset_t *, size_t *,
1605f4fc069Sjilinxpd 			int, cred_t *);
1614e72ade1SGordon Ross static void	smbfs_delmap_async(void *);
1625f4fc069Sjilinxpd 
1638329232eSGordon Ross static int	smbfs_rdwrlbn(vnode_t *, page_t *, u_offset_t, size_t, int,
1648329232eSGordon Ross 			cred_t *);
1658329232eSGordon Ross static int	smbfs_bio(struct buf *, int, cred_t *);
1668329232eSGordon Ross static int	smbfs_writenp(smbnode_t *np, caddr_t base, int tcount,
1678329232eSGordon Ross 			struct uio *uiop, int pgcreated);
1688329232eSGordon Ross #endif	// _KERNEL
1698329232eSGordon Ross 
1705f4fc069Sjilinxpd /*
1715f4fc069Sjilinxpd  * Error flags used to pass information about certain special errors
1725f4fc069Sjilinxpd  * which need to be handled specially.
1735f4fc069Sjilinxpd  */
1745f4fc069Sjilinxpd #define	SMBFS_EOF			-98
1755f4fc069Sjilinxpd 
1765f4fc069Sjilinxpd /* When implementing OtW locks, make this a real function. */
1775f4fc069Sjilinxpd #define	smbfs_lm_has_sleep(vp) 0
1785f4fc069Sjilinxpd 
1794bff34e3Sthurlow /*
1804bff34e3Sthurlow  * These are the vnode ops routines which implement the vnode interface to
1814bff34e3Sthurlow  * the networked file system.  These routines just take their parameters,
1824bff34e3Sthurlow  * make them look networkish by putting the right info into interface structs,
1834bff34e3Sthurlow  * and then calling the appropriate remote routine(s) to do the work.
1844bff34e3Sthurlow  *
1854bff34e3Sthurlow  * Note on directory name lookup cacheing:  If we detect a stale fhandle,
1864bff34e3Sthurlow  * we purge the directory cache relative to that vnode.  This way, the
1874bff34e3Sthurlow  * user won't get burned by the cache repeatedly.  See <smbfs/smbnode.h> for
1884bff34e3Sthurlow  * more details on smbnode locking.
1894bff34e3Sthurlow  */
1904bff34e3Sthurlow 
1914bff34e3Sthurlow 
1924bff34e3Sthurlow /*
1934bff34e3Sthurlow  * XXX
1944bff34e3Sthurlow  * When new and relevant functionality is enabled, we should be
1954bff34e3Sthurlow  * calling vfs_set_feature() to inform callers that pieces of
1969660e5cbSJanice Chang  * functionality are available, per PSARC 2007/227.
1974bff34e3Sthurlow  */
1984bff34e3Sthurlow /* ARGSUSED */
1994bff34e3Sthurlow static int
smbfs_open(vnode_t ** vpp,int flag,cred_t * cr,caller_context_t * ct)2004bff34e3Sthurlow smbfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
2014bff34e3Sthurlow {
2024bff34e3Sthurlow 	smbnode_t	*np;
2034bff34e3Sthurlow 	vnode_t		*vp;
20402d09e03SGordon Ross 	smbfattr_t	fa;
205adee6784SGordon Ross 	smb_fh_t	*fid = NULL;
206adee6784SGordon Ross 	smb_fh_t	*oldfid;
207adee6784SGordon Ross 	uint32_t	rights;
2084bff34e3Sthurlow 	struct smb_cred scred;
2094bff34e3Sthurlow 	smbmntinfo_t	*smi;
210613a2f6bSGordon Ross 	smb_share_t	*ssp;
2114bff34e3Sthurlow 	cred_t		*oldcr;
2124bff34e3Sthurlow 	int		error = 0;
2134bff34e3Sthurlow 
2144bff34e3Sthurlow 	vp = *vpp;
2154bff34e3Sthurlow 	np = VTOSMB(vp);
2164bff34e3Sthurlow 	smi = VTOSMI(vp);
217613a2f6bSGordon Ross 	ssp = smi->smi_share;
2184bff34e3Sthurlow 
219a19609f8Sjv 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
2204bff34e3Sthurlow 		return (EIO);
2214bff34e3Sthurlow 
2224bff34e3Sthurlow 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
2234bff34e3Sthurlow 		return (EIO);
2244bff34e3Sthurlow 
2254bff34e3Sthurlow 	if (vp->v_type != VREG && vp->v_type != VDIR) { /* XXX VLNK? */
2264bff34e3Sthurlow 		SMBVDEBUG("open eacces vtype=%d\n", vp->v_type);
2274bff34e3Sthurlow 		return (EACCES);
2284bff34e3Sthurlow 	}
2294bff34e3Sthurlow 
2304bff34e3Sthurlow 	/*
2314bff34e3Sthurlow 	 * Get exclusive access to n_fid and related stuff.
2324bff34e3Sthurlow 	 * No returns after this until out.
2334bff34e3Sthurlow 	 */
2344bff34e3Sthurlow 	if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, SMBINTR(vp)))
2354bff34e3Sthurlow 		return (EINTR);
236613a2f6bSGordon Ross 	smb_credinit(&scred, cr);
2374bff34e3Sthurlow 
23891d632c8Sgwr 	/*
23991d632c8Sgwr 	 * Keep track of the vnode type at first open.
24091d632c8Sgwr 	 * It may change later, and we need close to do
24191d632c8Sgwr 	 * cleanup for the type we opened.  Also deny
24291d632c8Sgwr 	 * open of new types until old type is closed.
24391d632c8Sgwr 	 */
24491d632c8Sgwr 	if (np->n_ovtype == VNON) {
24591d632c8Sgwr 		ASSERT(np->n_dirrefs == 0);
24691d632c8Sgwr 		ASSERT(np->n_fidrefs == 0);
24791d632c8Sgwr 	} else if (np->n_ovtype != vp->v_type) {
24891d632c8Sgwr 		SMBVDEBUG("open n_ovtype=%d v_type=%d\n",
24991d632c8Sgwr 		    np->n_ovtype, vp->v_type);
25091d632c8Sgwr 		error = EACCES;
25191d632c8Sgwr 		goto out;
25291d632c8Sgwr 	}
25391d632c8Sgwr 
2544bff34e3Sthurlow 	/*
2555ecede33SGordon Ross 	 * Directory open.  See smbfs_readvdir()
2564bff34e3Sthurlow 	 */
2574bff34e3Sthurlow 	if (vp->v_type == VDIR) {
2585ecede33SGordon Ross 		if (np->n_dirseq == NULL) {
2595ecede33SGordon Ross 			/* first open */
2605ecede33SGordon Ross 			error = smbfs_smb_findopen(np, "*", 1,
2615ecede33SGordon Ross 			    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
2625ecede33SGordon Ross 			    &scred, &np->n_dirseq);
2635ecede33SGordon Ross 			if (error != 0)
2645ecede33SGordon Ross 				goto out;
2655ecede33SGordon Ross 		}
2665ecede33SGordon Ross 		np->n_dirofs = FIRST_DIROFS;
2674bff34e3Sthurlow 		np->n_dirrefs++;
2684bff34e3Sthurlow 		goto have_fid;
2694bff34e3Sthurlow 	}
2704bff34e3Sthurlow 
2714bff34e3Sthurlow 	/*
2724bff34e3Sthurlow 	 * If caller specified O_TRUNC/FTRUNC, then be sure to set
2734bff34e3Sthurlow 	 * FWRITE (to drive successful setattr(size=0) after open)
2744bff34e3Sthurlow 	 */
2754bff34e3Sthurlow 	if (flag & FTRUNC)
2764bff34e3Sthurlow 		flag |= FWRITE;
2774bff34e3Sthurlow 
2784bff34e3Sthurlow 	/*
279613a2f6bSGordon Ross 	 * If we already have it open, and the FID is still valid,
280613a2f6bSGordon Ross 	 * check whether the rights are sufficient for FID reuse.
2814bff34e3Sthurlow 	 */
282613a2f6bSGordon Ross 	if (np->n_fidrefs > 0 &&
283adee6784SGordon Ross 	    (fid = np->n_fid) != NULL &&
284adee6784SGordon Ross 	    fid->fh_vcgenid == ssp->ss_vcgenid) {
2854bff34e3Sthurlow 		int upgrade = 0;
2864bff34e3Sthurlow 
2874bff34e3Sthurlow 		if ((flag & FWRITE) &&
288adee6784SGordon Ross 		    !(fid->fh_rights & SA_RIGHT_FILE_WRITE_DATA))
2894bff34e3Sthurlow 			upgrade = 1;
2904bff34e3Sthurlow 		if ((flag & FREAD) &&
291adee6784SGordon Ross 		    !(fid->fh_rights & SA_RIGHT_FILE_READ_DATA))
2924bff34e3Sthurlow 			upgrade = 1;
2934bff34e3Sthurlow 		if (!upgrade) {
2944bff34e3Sthurlow 			/*
2954bff34e3Sthurlow 			 *  the existing open is good enough
2964bff34e3Sthurlow 			 */
2974bff34e3Sthurlow 			np->n_fidrefs++;
2984bff34e3Sthurlow 			goto have_fid;
2994bff34e3Sthurlow 		}
300adee6784SGordon Ross 		fid = NULL;
3014bff34e3Sthurlow 	}
302adee6784SGordon Ross 	rights = (fid != NULL) ? fid->fh_rights : 0;
3034bff34e3Sthurlow 
3044bff34e3Sthurlow 	/*
3054bff34e3Sthurlow 	 * we always ask for READ_CONTROL so we can always get the
30691d632c8Sgwr 	 * owner/group IDs to satisfy a stat.  Ditto attributes.
3074bff34e3Sthurlow 	 */
30891d632c8Sgwr 	rights |= (STD_RIGHT_READ_CONTROL_ACCESS |
30991d632c8Sgwr 	    SA_RIGHT_FILE_READ_ATTRIBUTES);
3104bff34e3Sthurlow 	if ((flag & FREAD))
3114bff34e3Sthurlow 		rights |= SA_RIGHT_FILE_READ_DATA;
3124bff34e3Sthurlow 	if ((flag & FWRITE))
31302d09e03SGordon Ross 		rights |= SA_RIGHT_FILE_WRITE_DATA |
31402d09e03SGordon Ross 		    SA_RIGHT_FILE_APPEND_DATA |
31502d09e03SGordon Ross 		    SA_RIGHT_FILE_WRITE_ATTRIBUTES;
31602d09e03SGordon Ross 
31702d09e03SGordon Ross 	bzero(&fa, sizeof (fa));
31802d09e03SGordon Ross 	error = smbfs_smb_open(np,
31902d09e03SGordon Ross 	    NULL, 0, 0, /* name nmlen xattr */
32002d09e03SGordon Ross 	    rights, &scred,
321adee6784SGordon Ross 	    &fid, &fa);
3224bff34e3Sthurlow 	if (error)
3234bff34e3Sthurlow 		goto out;
32402d09e03SGordon Ross 	smbfs_attrcache_fa(vp, &fa);
3254bff34e3Sthurlow 
3264bff34e3Sthurlow 	/*
3274bff34e3Sthurlow 	 * We have a new FID and access rights.
3284bff34e3Sthurlow 	 */
329*168091e5SGordon Ross 	VERIFY(fid != NULL);
3304bff34e3Sthurlow 	oldfid = np->n_fid;
3314bff34e3Sthurlow 	np->n_fid = fid;
3324bff34e3Sthurlow 	np->n_fidrefs++;
333adee6784SGordon Ross 	if (oldfid != NULL)
334adee6784SGordon Ross 		smb_fh_rele(oldfid);
3354bff34e3Sthurlow 
3364bff34e3Sthurlow 	/*
3374bff34e3Sthurlow 	 * This thread did the open.
3384bff34e3Sthurlow 	 * Save our credentials too.
3394bff34e3Sthurlow 	 */
3404bff34e3Sthurlow 	mutex_enter(&np->r_statelock);
3414bff34e3Sthurlow 	oldcr = np->r_cred;
3424bff34e3Sthurlow 	np->r_cred = cr;
3434bff34e3Sthurlow 	crhold(cr);
3444bff34e3Sthurlow 	if (oldcr)
3454bff34e3Sthurlow 		crfree(oldcr);
3464bff34e3Sthurlow 	mutex_exit(&np->r_statelock);
3474bff34e3Sthurlow 
3484bff34e3Sthurlow have_fid:
34991d632c8Sgwr 	/*
35091d632c8Sgwr 	 * Keep track of the vnode type at first open.
35191d632c8Sgwr 	 * (see comments above)
35291d632c8Sgwr 	 */
35391d632c8Sgwr 	if (np->n_ovtype == VNON)
35491d632c8Sgwr 		np->n_ovtype = vp->v_type;
3554bff34e3Sthurlow 
3564bff34e3Sthurlow out:
3574bff34e3Sthurlow 	smb_credrele(&scred);
3584bff34e3Sthurlow 	smbfs_rw_exit(&np->r_lkserlock);
3594bff34e3Sthurlow 	return (error);
3604bff34e3Sthurlow }
3614bff34e3Sthurlow 
3624bff34e3Sthurlow /*ARGSUSED*/
3634bff34e3Sthurlow static int
smbfs_close(vnode_t * vp,int flag,int count,offset_t offset,cred_t * cr,caller_context_t * ct)3644bff34e3Sthurlow smbfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr,
3654bff34e3Sthurlow 	caller_context_t *ct)
3664bff34e3Sthurlow {
3674bff34e3Sthurlow 	smbnode_t	*np;
368613a2f6bSGordon Ross 	smbmntinfo_t	*smi;
3694bff34e3Sthurlow 	struct smb_cred scred;
3705f4fc069Sjilinxpd 	int error = 0;
3714bff34e3Sthurlow 
3724bff34e3Sthurlow 	np = VTOSMB(vp);
373613a2f6bSGordon Ross 	smi = VTOSMI(vp);
3744bff34e3Sthurlow 
3754bff34e3Sthurlow 	/*
3764bff34e3Sthurlow 	 * Don't "bail out" for VFS_UNMOUNTED here,
3774bff34e3Sthurlow 	 * as we want to do cleanup, etc.
3784bff34e3Sthurlow 	 */
3794bff34e3Sthurlow 
3804bff34e3Sthurlow 	/*
3814bff34e3Sthurlow 	 * zone_enter(2) prevents processes from changing zones with SMBFS files
3824bff34e3Sthurlow 	 * open; if we happen to get here from the wrong zone we can't do
3834bff34e3Sthurlow 	 * anything over the wire.
3844bff34e3Sthurlow 	 */
385a19609f8Sjv 	if (smi->smi_zone_ref.zref_zone != curproc->p_zone) {
3864bff34e3Sthurlow 		/*
3874bff34e3Sthurlow 		 * We could attempt to clean up locks, except we're sure
3884bff34e3Sthurlow 		 * that the current process didn't acquire any locks on
3894bff34e3Sthurlow 		 * the file: any attempt to lock a file belong to another zone
3904bff34e3Sthurlow 		 * will fail, and one can't lock an SMBFS file and then change
3914bff34e3Sthurlow 		 * zones, as that fails too.
3924bff34e3Sthurlow 		 *
3934bff34e3Sthurlow 		 * Returning an error here is the sane thing to do.  A
3944bff34e3Sthurlow 		 * subsequent call to VN_RELE() which translates to a
3954bff34e3Sthurlow 		 * smbfs_inactive() will clean up state: if the zone of the
3964bff34e3Sthurlow 		 * vnode's origin is still alive and kicking, an async worker
3974bff34e3Sthurlow 		 * thread will handle the request (from the correct zone), and
3984bff34e3Sthurlow 		 * everything (minus the final smbfs_getattr_otw() call) should
3994bff34e3Sthurlow 		 * be OK. If the zone is going away smbfs_async_inactive() will
4004bff34e3Sthurlow 		 * throw away cached pages inline.
4014bff34e3Sthurlow 		 */
4024bff34e3Sthurlow 		return (EIO);
4034bff34e3Sthurlow 	}
4044bff34e3Sthurlow 
4054bff34e3Sthurlow 	/*
4064bff34e3Sthurlow 	 * If we are using local locking for this filesystem, then
4074bff34e3Sthurlow 	 * release all of the SYSV style record locks.  Otherwise,
4084bff34e3Sthurlow 	 * we are doing network locking and we need to release all
4094bff34e3Sthurlow 	 * of the network locks.  All of the locks held by this
4104bff34e3Sthurlow 	 * process on this file are released no matter what the
4114bff34e3Sthurlow 	 * incoming reference count is.
4124bff34e3Sthurlow 	 */
41342d15982SGordon Ross 	if (smi->smi_flags & SMI_LLOCK) {
414613a2f6bSGordon Ross 		pid_t pid = ddi_get_pid();
415613a2f6bSGordon Ross 		cleanlocks(vp, pid, 0);
416613a2f6bSGordon Ross 		cleanshares(vp, pid);
4174bff34e3Sthurlow 	}
4185f4fc069Sjilinxpd 	/*
4195f4fc069Sjilinxpd 	 * else doing OtW locking.  SMB servers drop all locks
4205f4fc069Sjilinxpd 	 * on the file ID we close here, so no _lockrelease()
4215f4fc069Sjilinxpd 	 */
4224bff34e3Sthurlow 
4234bff34e3Sthurlow 	/*
42402d09e03SGordon Ross 	 * This (passed in) count is the ref. count from the
42502d09e03SGordon Ross 	 * user's file_t before the closef call (fio.c).
4265f4fc069Sjilinxpd 	 * The rest happens only on last close.
4274bff34e3Sthurlow 	 */
42802d09e03SGordon Ross 	if (count > 1)
42902d09e03SGordon Ross 		return (0);
4304bff34e3Sthurlow 
4315f4fc069Sjilinxpd 	/* NFS has DNLC purge here. */
4325f4fc069Sjilinxpd 
4335f4fc069Sjilinxpd 	/*
4345f4fc069Sjilinxpd 	 * If the file was open for write and there are pages,
4355f4fc069Sjilinxpd 	 * then make sure dirty pages written back.
4365f4fc069Sjilinxpd 	 *
4375f4fc069Sjilinxpd 	 * NFS does this async when "close-to-open" is off
4385f4fc069Sjilinxpd 	 * (MI_NOCTO flag is set) to avoid blocking the caller.
4395f4fc069Sjilinxpd 	 * For now, always do this synchronously (no B_ASYNC).
4405f4fc069Sjilinxpd 	 */
4415f4fc069Sjilinxpd 	if ((flag & FWRITE) && vn_has_cached_data(vp)) {
4425f4fc069Sjilinxpd 		error = smbfs_putpage(vp, (offset_t)0, 0, 0, cr, ct);
4435f4fc069Sjilinxpd 		if (error == EAGAIN)
4445f4fc069Sjilinxpd 			error = 0;
4455f4fc069Sjilinxpd 	}
4465f4fc069Sjilinxpd 	if (error == 0) {
4475f4fc069Sjilinxpd 		mutex_enter(&np->r_statelock);
4485f4fc069Sjilinxpd 		np->r_flags &= ~RSTALE;
4495f4fc069Sjilinxpd 		np->r_error = 0;
4505f4fc069Sjilinxpd 		mutex_exit(&np->r_statelock);
4515f4fc069Sjilinxpd 	}
4525f4fc069Sjilinxpd 
4534bff34e3Sthurlow 	/*
45442d15982SGordon Ross 	 * Decrement the reference count for the FID
45542d15982SGordon Ross 	 * and possibly do the OtW close.
45642d15982SGordon Ross 	 *
4574bff34e3Sthurlow 	 * Exclusive lock for modifying n_fid stuff.
4584bff34e3Sthurlow 	 * Don't want this one ever interruptible.
4594bff34e3Sthurlow 	 */
4604bff34e3Sthurlow 	(void) smbfs_rw_enter_sig(&np->r_lkserlock, RW_WRITER, 0);
461613a2f6bSGordon Ross 	smb_credinit(&scred, cr);
4624bff34e3Sthurlow 
46342d15982SGordon Ross 	smbfs_rele_fid(np, &scred);
46442d15982SGordon Ross 
46542d15982SGordon Ross 	smb_credrele(&scred);
46642d15982SGordon Ross 	smbfs_rw_exit(&np->r_lkserlock);
46742d15982SGordon Ross 
46842d15982SGordon Ross 	return (0);
46942d15982SGordon Ross }
47042d15982SGordon Ross 
47142d15982SGordon Ross /*
47242d15982SGordon Ross  * Helper for smbfs_close.  Decrement the reference count
47342d15982SGordon Ross  * for an SMB-level file or directory ID, and when the last
47442d15982SGordon Ross  * reference for the fid goes away, do the OtW close.
47542d15982SGordon Ross  * Also called in smbfs_inactive (defensive cleanup).
47642d15982SGordon Ross  */
47742d15982SGordon Ross static void
smbfs_rele_fid(smbnode_t * np,struct smb_cred * scred)47842d15982SGordon Ross smbfs_rele_fid(smbnode_t *np, struct smb_cred *scred)
47942d15982SGordon Ross {
48042d15982SGordon Ross 	cred_t		*oldcr;
48142d15982SGordon Ross 	struct smbfs_fctx *fctx;
48242d15982SGordon Ross 	int		error;
483adee6784SGordon Ross 	smb_fh_t	*ofid;
48442d15982SGordon Ross 
4854bff34e3Sthurlow 	error = 0;
48691d632c8Sgwr 
48742d15982SGordon Ross 	/* Make sure we serialize for n_dirseq use. */
48842d15982SGordon Ross 	ASSERT(smbfs_rw_lock_held(&np->r_lkserlock, RW_WRITER));
48942d15982SGordon Ross 
49091d632c8Sgwr 	/*
49191d632c8Sgwr 	 * Note that vp->v_type may change if a remote node
49291d632c8Sgwr 	 * is deleted and recreated as a different type, and
49391d632c8Sgwr 	 * our getattr may change v_type accordingly.
49491d632c8Sgwr 	 * Now use n_ovtype to keep track of the v_type
49591d632c8Sgwr 	 * we had during open (see comments above).
49691d632c8Sgwr 	 */
49742d15982SGordon Ross 	switch (np->n_ovtype) {
49842d15982SGordon Ross 	case VDIR:
4994bff34e3Sthurlow 		ASSERT(np->n_dirrefs > 0);
5004bff34e3Sthurlow 		if (--np->n_dirrefs)
50142d15982SGordon Ross 			return;
5024bff34e3Sthurlow 		if ((fctx = np->n_dirseq) != NULL) {
5034bff34e3Sthurlow 			np->n_dirseq = NULL;
5045ecede33SGordon Ross 			np->n_dirofs = 0;
50542d15982SGordon Ross 			error = smbfs_smb_findclose(fctx, scred);
5064bff34e3Sthurlow 		}
50742d15982SGordon Ross 		break;
50842d15982SGordon Ross 
50942d15982SGordon Ross 	case VREG:
5104bff34e3Sthurlow 		ASSERT(np->n_fidrefs > 0);
5114bff34e3Sthurlow 		if (--np->n_fidrefs)
51242d15982SGordon Ross 			return;
513adee6784SGordon Ross 		if ((ofid = np->n_fid) != NULL) {
514adee6784SGordon Ross 			np->n_fid = NULL;
515adee6784SGordon Ross 			smb_fh_rele(ofid);
5164bff34e3Sthurlow 		}
51742d15982SGordon Ross 		break;
51842d15982SGordon Ross 
51942d15982SGordon Ross 	default:
52042d15982SGordon Ross 		SMBVDEBUG("bad n_ovtype %d\n", np->n_ovtype);
52142d15982SGordon Ross 		break;
5224bff34e3Sthurlow 	}
5234bff34e3Sthurlow 	if (error) {
52402d09e03SGordon Ross 		SMBVDEBUG("error %d closing %s\n",
5254bff34e3Sthurlow 		    error, np->n_rpath);
5264bff34e3Sthurlow 	}
5274bff34e3Sthurlow 
52891d632c8Sgwr 	/* Allow next open to use any v_type. */
52991d632c8Sgwr 	np->n_ovtype = VNON;
53091d632c8Sgwr 
53102d09e03SGordon Ross 	/*
53202d09e03SGordon Ross 	 * Other "last close" stuff.
53302d09e03SGordon Ross 	 */
53402d09e03SGordon Ross 	mutex_enter(&np->r_statelock);
5354bff34e3Sthurlow 	if (np->n_flag & NATTRCHANGED)
53602d09e03SGordon Ross 		smbfs_attrcache_rm_locked(np);
53702d09e03SGordon Ross 	oldcr = np->r_cred;
53802d09e03SGordon Ross 	np->r_cred = NULL;
53902d09e03SGordon Ross 	mutex_exit(&np->r_statelock);
54002d09e03SGordon Ross 	if (oldcr != NULL)
54102d09e03SGordon Ross 		crfree(oldcr);
5424bff34e3Sthurlow }
5434bff34e3Sthurlow 
5444bff34e3Sthurlow /* ARGSUSED */
5454bff34e3Sthurlow static int
smbfs_read(vnode_t * vp,struct uio * uiop,int ioflag,cred_t * cr,caller_context_t * ct)5464bff34e3Sthurlow smbfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
5474bff34e3Sthurlow 	caller_context_t *ct)
5484bff34e3Sthurlow {
5499c9af259SGordon Ross 	struct smb_cred scred;
5509c9af259SGordon Ross 	struct vattr	va;
551613a2f6bSGordon Ross 	smbnode_t	*np;
552613a2f6bSGordon Ross 	smbmntinfo_t	*smi;
5539c9af259SGordon Ross 	offset_t	endoff;
5549c9af259SGordon Ross 	ssize_t		past_eof;
5559c9af259SGordon Ross 	int		error;
5564bff34e3Sthurlow 
5574bff34e3Sthurlow 	np = VTOSMB(vp);
5584bff34e3Sthurlow 	smi = VTOSMI(vp);
5594bff34e3Sthurlow 
560a19609f8Sjv 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
5614bff34e3Sthurlow 		return (EIO);
5624bff34e3Sthurlow 
5634bff34e3Sthurlow 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
5644bff34e3Sthurlow 		return (EIO);
5654bff34e3Sthurlow 
566*168091e5SGordon Ross 	/* Sanity check: should have a valid open */
567*168091e5SGordon Ross 	if (np->n_fid == NULL)
568*168091e5SGordon Ross 		return (EIO);
569*168091e5SGordon Ross 
5704bff34e3Sthurlow 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_READER));
5714bff34e3Sthurlow 
5724bff34e3Sthurlow 	if (vp->v_type != VREG)
5734bff34e3Sthurlow 		return (EISDIR);
5744bff34e3Sthurlow 
5754bff34e3Sthurlow 	if (uiop->uio_resid == 0)
5764bff34e3Sthurlow 		return (0);
5774bff34e3Sthurlow 
5784bff34e3Sthurlow 	/*
5794bff34e3Sthurlow 	 * Like NFS3, just check for 63-bit overflow.
5804bff34e3Sthurlow 	 * Our SMB layer takes care to return EFBIG
5814bff34e3Sthurlow 	 * when it has to fallback to a 32-bit call.
5824bff34e3Sthurlow 	 */
583613a2f6bSGordon Ross 	endoff = uiop->uio_loffset + uiop->uio_resid;
584613a2f6bSGordon Ross 	if (uiop->uio_loffset < 0 || endoff < 0)
5854bff34e3Sthurlow 		return (EINVAL);
5864bff34e3Sthurlow 
5874bff34e3Sthurlow 	/* get vnode attributes from server */
5884bff34e3Sthurlow 	va.va_mask = AT_SIZE | AT_MTIME;
5894bff34e3Sthurlow 	if (error = smbfsgetattr(vp, &va, cr))
5909c9af259SGordon Ross 		return (error);
5914bff34e3Sthurlow 
5929c9af259SGordon Ross 	/* Update mtime with mtime from server here? */
5939c9af259SGordon Ross 
5949c9af259SGordon Ross 	/* if offset is beyond EOF, read nothing */
5959c9af259SGordon Ross 	if (uiop->uio_loffset >= va.va_size)
5969c9af259SGordon Ross 		return (0);
5974bff34e3Sthurlow 
5984bff34e3Sthurlow 	/*
5999c9af259SGordon Ross 	 * Limit the read to the remaining file size.
6009c9af259SGordon Ross 	 * Do this by temporarily reducing uio_resid
6019c9af259SGordon Ross 	 * by the amount the lies beyoned the EOF.
6024bff34e3Sthurlow 	 */
6039c9af259SGordon Ross 	if (endoff > va.va_size) {
6049c9af259SGordon Ross 		past_eof = (ssize_t)(endoff - va.va_size);
6059c9af259SGordon Ross 		uiop->uio_resid -= past_eof;
6069c9af259SGordon Ross 	} else
6079c9af259SGordon Ross 		past_eof = 0;
6089c9af259SGordon Ross 
6095f4fc069Sjilinxpd 	/*
6105f4fc069Sjilinxpd 	 * Bypass VM if caching has been disabled (e.g., locking) or if
6115f4fc069Sjilinxpd 	 * using client-side direct I/O and the file is not mmap'd and
6125f4fc069Sjilinxpd 	 * there are no cached pages.
6135f4fc069Sjilinxpd 	 */
6145f4fc069Sjilinxpd 	if ((vp->v_flag & VNOCACHE) ||
6155f4fc069Sjilinxpd 	    (((np->r_flags & RDIRECTIO) || (smi->smi_flags & SMI_DIRECTIO)) &&
6165f4fc069Sjilinxpd 	    np->r_mapcnt == 0 && np->r_inmap == 0 &&
6175f4fc069Sjilinxpd 	    !vn_has_cached_data(vp))) {
6184bff34e3Sthurlow 
6195f4fc069Sjilinxpd 		/* Shared lock for n_fid use in smb_rwuio */
6205f4fc069Sjilinxpd 		if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp)))
6215f4fc069Sjilinxpd 			return (EINTR);
6225f4fc069Sjilinxpd 		smb_credinit(&scred, cr);
6239c9af259SGordon Ross 
624adee6784SGordon Ross 		error = smb_rwuio(np->n_fid, UIO_READ,
625adee6784SGordon Ross 		    uiop, &scred, smb_timo_read);
6265f4fc069Sjilinxpd 
6275f4fc069Sjilinxpd 		smb_credrele(&scred);
6285f4fc069Sjilinxpd 		smbfs_rw_exit(&np->r_lkserlock);
6295f4fc069Sjilinxpd 
6305f4fc069Sjilinxpd 		/* undo adjustment of resid */
6315f4fc069Sjilinxpd 		uiop->uio_resid += past_eof;
6325f4fc069Sjilinxpd 
6335f4fc069Sjilinxpd 		return (error);
6345f4fc069Sjilinxpd 	}
6355f4fc069Sjilinxpd 
6368329232eSGordon Ross #ifdef	_KERNEL
6375f4fc069Sjilinxpd 	/* (else) Do I/O through segmap. */
6385f4fc069Sjilinxpd 	do {
6398329232eSGordon Ross 		caddr_t		base;
6408329232eSGordon Ross 		u_offset_t	off;
6418329232eSGordon Ross 		size_t		n;
6428329232eSGordon Ross 		int		on;
6438329232eSGordon Ross 		uint_t		flags;
6448329232eSGordon Ross 
6455f4fc069Sjilinxpd 		off = uiop->uio_loffset & MAXBMASK; /* mapping offset */
6465f4fc069Sjilinxpd 		on = uiop->uio_loffset & MAXBOFFSET; /* Relative offset */
6475f4fc069Sjilinxpd 		n = MIN(MAXBSIZE - on, uiop->uio_resid);
6485f4fc069Sjilinxpd 
6495f4fc069Sjilinxpd 		error = smbfs_validate_caches(vp, cr);
6505f4fc069Sjilinxpd 		if (error)
6515f4fc069Sjilinxpd 			break;
6525f4fc069Sjilinxpd 
6535f4fc069Sjilinxpd 		/* NFS waits for RINCACHEPURGE here. */
6545f4fc069Sjilinxpd 
6555f4fc069Sjilinxpd 		if (vpm_enable) {
6565f4fc069Sjilinxpd 			/*
6575f4fc069Sjilinxpd 			 * Copy data.
6585f4fc069Sjilinxpd 			 */
6595f4fc069Sjilinxpd 			error = vpm_data_copy(vp, off + on, n, uiop,
6605f4fc069Sjilinxpd 			    1, NULL, 0, S_READ);
6615f4fc069Sjilinxpd 		} else {
6625f4fc069Sjilinxpd 			base = segmap_getmapflt(segkmap, vp, off + on, n, 1,
6635f4fc069Sjilinxpd 			    S_READ);
6645f4fc069Sjilinxpd 
6655f4fc069Sjilinxpd 			error = uiomove(base + on, n, UIO_READ, uiop);
6665f4fc069Sjilinxpd 		}
6675f4fc069Sjilinxpd 
6685f4fc069Sjilinxpd 		if (!error) {
6695f4fc069Sjilinxpd 			/*
6705f4fc069Sjilinxpd 			 * If read a whole block or read to eof,
6715f4fc069Sjilinxpd 			 * won't need this buffer again soon.
6725f4fc069Sjilinxpd 			 */
6735f4fc069Sjilinxpd 			mutex_enter(&np->r_statelock);
6745f4fc069Sjilinxpd 			if (n + on == MAXBSIZE ||
6755f4fc069Sjilinxpd 			    uiop->uio_loffset == np->r_size)
6765f4fc069Sjilinxpd 				flags = SM_DONTNEED;
6775f4fc069Sjilinxpd 			else
6785f4fc069Sjilinxpd 				flags = 0;
6795f4fc069Sjilinxpd 			mutex_exit(&np->r_statelock);
6805f4fc069Sjilinxpd 			if (vpm_enable) {
6815f4fc069Sjilinxpd 				error = vpm_sync_pages(vp, off, n, flags);
6825f4fc069Sjilinxpd 			} else {
6835f4fc069Sjilinxpd 				error = segmap_release(segkmap, base, flags);
6845f4fc069Sjilinxpd 			}
6855f4fc069Sjilinxpd 		} else {
6865f4fc069Sjilinxpd 			if (vpm_enable) {
6875f4fc069Sjilinxpd 				(void) vpm_sync_pages(vp, off, n, 0);
6885f4fc069Sjilinxpd 			} else {
6895f4fc069Sjilinxpd 				(void) segmap_release(segkmap, base, 0);
6905f4fc069Sjilinxpd 			}
6915f4fc069Sjilinxpd 		}
6925f4fc069Sjilinxpd 	} while (!error && uiop->uio_resid > 0);
6938329232eSGordon Ross #else	// _KERNEL
6948329232eSGordon Ross 	error = ENOSYS;
6958329232eSGordon Ross #endif	// _KERNEL
6964bff34e3Sthurlow 
6979c9af259SGordon Ross 	/* undo adjustment of resid */
6989c9af259SGordon Ross 	uiop->uio_resid += past_eof;
6999c9af259SGordon Ross 
7009c9af259SGordon Ross 	return (error);
7014bff34e3Sthurlow }
7024bff34e3Sthurlow 
7034bff34e3Sthurlow 
7044bff34e3Sthurlow /* ARGSUSED */
7054bff34e3Sthurlow static int
smbfs_write(vnode_t * vp,struct uio * uiop,int ioflag,cred_t * cr,caller_context_t * ct)7064bff34e3Sthurlow smbfs_write(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
7074bff34e3Sthurlow 	caller_context_t *ct)
7084bff34e3Sthurlow {
7099c9af259SGordon Ross 	struct smb_cred scred;
7105f4fc069Sjilinxpd 	struct vattr    va;
711613a2f6bSGordon Ross 	smbnode_t	*np;
712613a2f6bSGordon Ross 	smbmntinfo_t	*smi;
7139c9af259SGordon Ross 	offset_t	endoff, limit;
7149c9af259SGordon Ross 	ssize_t		past_limit;
7159c9af259SGordon Ross 	int		error, timo;
7165f4fc069Sjilinxpd 	u_offset_t	last_off;
7175f4fc069Sjilinxpd 	size_t		last_resid;
7188329232eSGordon Ross #ifdef	_KERNEL
7195f4fc069Sjilinxpd 	uint_t		bsize;
7208329232eSGordon Ross #endif
7214bff34e3Sthurlow 
7224bff34e3Sthurlow 	np = VTOSMB(vp);
7234bff34e3Sthurlow 	smi = VTOSMI(vp);
7244bff34e3Sthurlow 
725a19609f8Sjv 	if (curproc->p_zone != smi->smi_zone_ref.zref_zone)
7264bff34e3Sthurlow 		return (EIO);
7274bff34e3Sthurlow 
7284bff34e3Sthurlow 	if (smi->smi_flags & SMI_DEAD || vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
7294bff34e3Sthurlow 		return (EIO);
7304bff34e3Sthurlow 
731*168091e5SGordon Ross 	/* Sanity check: should have a valid open */
732*168091e5SGordon Ross 	if (np->n_fid == NULL)
733*168091e5SGordon Ross 		return (EIO);
734*168091e5SGordon Ross 
7354bff34e3Sthurlow 	ASSERT(smbfs_rw_lock_held(&np->r_rwlock, RW_WRITER));
7364bff34e3Sthurlow 
7374bff34e3Sthurlow 	if (vp->v_type != VREG)
7384bff34e3Sthurlow 		return (EISDIR);
7394bff34e3Sthurlow 
7404bff34e3Sthurlow 	if (uiop->uio_resid == 0)
7414bff34e3Sthurlow 		return (0);
7424bff34e3Sthurlow 
7439c9af259SGordon Ross 	/*
7449c9af259SGordon Ross 	 * Handle ioflag bits: (FAPPEND|FSYNC|FDSYNC)
7459c9af259SGordon Ross 	 */
7469c9af259SGordon Ross 	if (ioflag & (FAPPEND | FSYNC)) {
7479c9af259SGordon Ross 		if (np->n_flag & NMODIFIED) {
74802d09e03SGordon Ross 			smbfs_attrcache_remove(np);
7499c9af259SGordon Ross 		}
7509c9af259SGordon Ross 	}
7519c9af259SGordon Ross 	if (ioflag & FAPPEND) {
7529c9af259SGordon Ross 		/*
7539c9af259SGordon Ross 		 * File size can be changed by another client
7545f4fc069Sjilinxpd 		 *
7555f4fc069Sjilinxpd 		 * Todo: Consider redesigning this to use a
7565f4fc069Sjilinxpd 		 * handle opened for append instead.
7579c9af259SGordon Ross 		 */
7589c9af259SGordon Ross 		va.va_mask = AT_SIZE;
7599c9af259SGordon Ross 		if (error = smbfsgetattr(vp, &va, cr))
7609c9af259SGordon Ross 			return (error);
7619c9af259SGordon Ross 		uiop->uio_loffset = va.va_size;
7629c9af259SGordon Ross 	}
7634bff34e3Sthurlow 
7649c9af259SGordon Ross 	/*
7659c9af259SGordon Ross 	 * Like NFS3, just check for 63-bit overflow.
7669c9af259SGordon Ross 	 */
7679c9af259SGordon Ross 	endoff = uiop->uio_loffset + uiop->uio_resid;
7689c9af259SGordon Ross 	if (uiop->uio_loffset < 0 || endoff < 0)
7699c9af259SGordon Ross 		return (EINVAL);
7704bff34e3Sthurlow 
7714bff34e3Sthurlow 	/*
7729c9af259SGordon Ross 	 * Check to make sure that the process will not exceed
7739c9af259SGordon Ross 	 * its limit on file size.  It is okay to write up to
7749c9af259SGordon Ross 	 * the limit, but not beyond.  Thus, the write which
7759c9af259SGordon Ross 	 * reaches the limit will be short and the next write
7769c9af259SGordon Ross 	 * will return an error.
7779c9af259SGordon Ross 	 *
7789c9af259SGordon Ross 	 * So if we're starting at or beyond the limit, EFBIG.
7799c9af259SGordon Ross 	 * Otherwise, temporarily reduce resid to the amount
7805f4fc069Sjilinxpd 	 * that is after the limit.
7814bff34e3Sthurlow 	 */
7829c9af259SGordon Ross 	limit = uiop->uio_llimit;
7839c9af259SGordon Ross 	if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
7849c9af259SGordon Ross 		limit = MAXOFFSET_T;
7855f4fc069Sjilinxpd 	if (uiop->uio_loffset >= limit) {
7868329232eSGordon Ross #ifdef	_KERNEL
7875f4fc069Sjilinxpd 		proc_t *