14bff34e3Sthurlow /*
24bff34e3Sthurlow  * CDDL HEADER START
34bff34e3Sthurlow  *
44bff34e3Sthurlow  * The contents of this file are subject to the terms of the
54bff34e3Sthurlow  * Common Development and Distribution License (the "License").
64bff34e3Sthurlow  * You may not use this file except in compliance with the License.
74bff34e3Sthurlow  *
84bff34e3Sthurlow  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
94bff34e3Sthurlow  * or http://www.opensolaris.org/os/licensing.
104bff34e3Sthurlow  * See the License for the specific language governing permissions
114bff34e3Sthurlow  * and limitations under the License.
124bff34e3Sthurlow  *
134bff34e3Sthurlow  * When distributing Covered Code, include this CDDL HEADER in each
144bff34e3Sthurlow  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
154bff34e3Sthurlow  * If applicable, add the following below this CDDL HEADER, with the
164bff34e3Sthurlow  * fields enclosed by brackets "[]" replaced with your own identifying
174bff34e3Sthurlow  * information: Portions Copyright [yyyy] [name of copyright owner]
184bff34e3Sthurlow  *
194bff34e3Sthurlow  * CDDL HEADER END
204bff34e3Sthurlow  */
214bff34e3Sthurlow /*
22bd7c6f51SGordon Ross  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
234bff34e3Sthurlow  * Use is subject to license terms.
244bff34e3Sthurlow  *
254bff34e3Sthurlow  *	Copyright (c) 1983,1984,1985,1986,1987,1988,1989  AT&T.
264bff34e3Sthurlow  *	All rights reserved.
274bff34e3Sthurlow  */
28ade42b55SSebastien Roy /*
29ade42b55SSebastien Roy  * Copyright (c) 2017 by Delphix. All rights reserved.
30*adee6784SGordon Ross  * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
31ade42b55SSebastien Roy  */
324bff34e3Sthurlow 
334bff34e3Sthurlow /*
3402d09e03SGordon Ross  * Node hash implementation initially borrowed from NFS (nfs_subr.c)
3502d09e03SGordon Ross  * but then heavily modified. It's no longer an array of hash lists,
3602d09e03SGordon Ross  * but an AVL tree per mount point.  More on this below.
374bff34e3Sthurlow  */
384bff34e3Sthurlow 
394bff34e3Sthurlow #include <sys/param.h>
404bff34e3Sthurlow #include <sys/systm.h>
414bff34e3Sthurlow #include <sys/time.h>
424bff34e3Sthurlow #include <sys/vnode.h>
438329232eSGordon Ross #include <sys/atomic.h>
444bff34e3Sthurlow #include <sys/bitmap.h>
458329232eSGordon Ross #include <sys/buf.h>
464bff34e3Sthurlow #include <sys/dnlc.h>
474bff34e3Sthurlow #include <sys/kmem.h>
484bff34e3Sthurlow #include <sys/sunddi.h>
4902d09e03SGordon Ross #include <sys/sysmacros.h>
505f4fc069Sjilinxpd #include <sys/fcntl.h>
514bff34e3Sthurlow 
524bff34e3Sthurlow #include <netsmb/smb_osdep.h>
534bff34e3Sthurlow 
544bff34e3Sthurlow #include <netsmb/smb.h>
554bff34e3Sthurlow #include <netsmb/smb_conn.h>
564bff34e3Sthurlow #include <netsmb/smb_subr.h>
574bff34e3Sthurlow #include <netsmb/smb_rq.h>
584bff34e3Sthurlow 
594bff34e3Sthurlow #include <smbfs/smbfs.h>
604bff34e3Sthurlow #include <smbfs/smbfs_node.h>
614bff34e3Sthurlow #include <smbfs/smbfs_subr.h>
624bff34e3Sthurlow 
634bff34e3Sthurlow /*
6402d09e03SGordon Ross  * The AVL trees (now per-mount) allow finding an smbfs node by its
6502d09e03SGordon Ross  * full remote path name.  It also allows easy traversal of all nodes
6602d09e03SGordon Ross  * below (path wise) any given node.  A reader/writer lock for each
6702d09e03SGordon Ross  * (per mount) AVL tree is used to control access and to synchronize
6802d09e03SGordon Ross  * lookups, additions, and deletions from that AVL tree.
6902d09e03SGordon Ross  *
7002d09e03SGordon Ross  * Previously, this code use a global array of hash chains, each with
7102d09e03SGordon Ross  * its own rwlock.  A few struct members, functions, and comments may
7202d09e03SGordon Ross  * still refer to a "hash", and those should all now be considered to
7302d09e03SGordon Ross  * refer to the per-mount AVL tree that replaced the old hash chains.
7402d09e03SGordon Ross  * (i.e. member smi_hash_lk, function sn_hashfind, etc.)
754bff34e3Sthurlow  *
764bff34e3Sthurlow  * The smbnode freelist is organized as a doubly linked list with
774bff34e3Sthurlow  * a head pointer.  Additions and deletions are synchronized via
784bff34e3Sthurlow  * a single mutex.
794bff34e3Sthurlow  *
8002d09e03SGordon Ross  * In order to add an smbnode to the free list, it must be linked into
8102d09e03SGordon Ross  * the mount's AVL tree and the exclusive lock for the AVL must be held.
8202d09e03SGordon Ross  * If an smbnode is not linked into the AVL tree, then it is destroyed
834bff34e3Sthurlow  * because it represents no valuable information that can be reused
8402d09e03SGordon Ross  * about the file.  The exclusive lock for the AVL tree must be held
8502d09e03SGordon Ross  * in order to prevent a lookup in the AVL tree from finding the
8602d09e03SGordon Ross  * smbnode and using it and assuming that the smbnode is not on the
8702d09e03SGordon Ross  * freelist.  The lookup in the AVL tree will have the AVL tree lock
8802d09e03SGordon Ross  * held, either exclusive or shared.
894bff34e3Sthurlow  *
904bff34e3Sthurlow  * The vnode reference count for each smbnode is not allowed to drop
914bff34e3Sthurlow  * below 1.  This prevents external entities, such as the VM
924bff34e3Sthurlow  * subsystem, from acquiring references to vnodes already on the
934bff34e3Sthurlow  * freelist and then trying to place them back on the freelist
944bff34e3Sthurlow  * when their reference is released.  This means that the when an
9502d09e03SGordon Ross  * smbnode is looked up in the AVL tree, then either the smbnode
964bff34e3Sthurlow  * is removed from the freelist and that reference is tranfered to
974bff34e3Sthurlow  * the new reference or the vnode reference count must be incremented
984bff34e3Sthurlow  * accordingly.  The mutex for the freelist must be held in order to
994bff34e3Sthurlow  * accurately test to see if the smbnode is on the freelist or not.
10002d09e03SGordon Ross  * The AVL tree lock might be held shared and it is possible that
1014bff34e3Sthurlow  * two different threads may race to remove the smbnode from the
1024bff34e3Sthurlow  * freelist.  This race can be resolved by holding the mutex for the
1034bff34e3Sthurlow  * freelist.  Please note that the mutex for the freelist does not
1044bff34e3Sthurlow  * need to held if the smbnode is not on the freelist.  It can not be
1054bff34e3Sthurlow  * placed on the freelist due to the requirement that the thread
1064bff34e3Sthurlow  * putting the smbnode on the freelist must hold the exclusive lock
10702d09e03SGordon Ross  * for the AVL tree and the thread doing the lookup in the AVL tree
10802d09e03SGordon Ross  * is holding either a shared or exclusive lock for the AVL tree.
1094bff34e3Sthurlow  *
1104bff34e3Sthurlow  * The lock ordering is:
1114bff34e3Sthurlow  *
11202d09e03SGordon Ross  *	AVL tree lock -> vnode lock
11302d09e03SGordon Ross  *	AVL tree lock -> freelist lock
1144bff34e3Sthurlow  */
1154bff34e3Sthurlow 
1164bff34e3Sthurlow static kmutex_t smbfreelist_lock;
1174bff34e3Sthurlow static smbnode_t *smbfreelist = NULL;
1184bff34e3Sthurlow static ulong_t	smbnodenew = 0;
1194bff34e3Sthurlow long	nsmbnode = 0;
1204bff34e3Sthurlow 
1214bff34e3Sthurlow static struct kmem_cache *smbnode_cache;
1224bff34e3Sthurlow 
123bd7c6f51SGordon Ross static const vsecattr_t smbfs_vsa0 = { 0 };
124bd7c6f51SGordon Ross 
1254bff34e3Sthurlow /*
1264bff34e3Sthurlow  * Mutex to protect the following variables:
1274bff34e3Sthurlow  *	smbfs_major
1284bff34e3Sthurlow  *	smbfs_minor
1294bff34e3Sthurlow  */
1304bff34e3Sthurlow kmutex_t smbfs_minor_lock;
1314bff34e3Sthurlow int smbfs_major;
1324bff34e3Sthurlow int smbfs_minor;
1334bff34e3Sthurlow 
13402d09e03SGordon Ross /* See smbfs_node_findcreate() */
13502d09e03SGordon Ross struct smbfattr smbfs_fattr0;
13602d09e03SGordon Ross 
1374bff34e3Sthurlow /*
1384bff34e3Sthurlow  * Local functions.
13902d09e03SGordon Ross  * SN for Smb Node
1404bff34e3Sthurlow  */
14102d09e03SGordon Ross static void sn_rmfree(smbnode_t *);
14202d09e03SGordon Ross static void sn_inactive(smbnode_t *);
14302d09e03SGordon Ross static void sn_addhash_locked(smbnode_t *, avl_index_t);
14402d09e03SGordon Ross static void sn_rmhash_locked(smbnode_t *);
14502d09e03SGordon Ross static void sn_destroy_node(smbnode_t *);
1464bff34e3Sthurlow void smbfs_kmem_reclaim(void *cdrarg);
1474bff34e3Sthurlow 
14802d09e03SGordon Ross static smbnode_t *
14902d09e03SGordon Ross sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *);
1504bff34e3Sthurlow 
15102d09e03SGordon Ross static smbnode_t *
15202d09e03SGordon Ross make_smbnode(smbmntinfo_t *, const char *, int, int *);
1534bff34e3Sthurlow 
1544bff34e3Sthurlow /*
1554bff34e3Sthurlow  * Free the resources associated with an smbnode.
1564bff34e3Sthurlow  * Note: This is different from smbfs_inactive
1574bff34e3Sthurlow  *
1585f4fc069Sjilinxpd  * From NFS: nfs_subr.c:rinactive
1594bff34e3Sthurlow  */
16002d09e03SGordon Ross static void
sn_inactive(smbnode_t * np)16102d09e03SGordon Ross sn_inactive(smbnode_t *np)
1624bff34e3Sthurlow {
163bd7c6f51SGordon Ross 	vsecattr_t	ovsa;
16402d09e03SGordon Ross 	cred_t		*oldcr;
1655f4fc069Sjilinxpd 	char		*orpath;
16602d09e03SGordon Ross 	int		orplen;
1675f4fc069Sjilinxpd 	vnode_t		*vp;
1684bff34e3Sthurlow 
16902d09e03SGordon Ross 	/*
1705f4fc069Sjilinxpd 	 * Here NFS has:
1715f4fc069Sjilinxpd 	 * Flush and invalidate all pages (done by caller)
17202d09e03SGordon Ross 	 * Free any held credentials and caches...
17302d09e03SGordon Ross 	 * etc.  (See NFS code)
17402d09e03SGordon Ross 	 */
17502d09e03SGordon Ross 	mutex_enter(&np->r_statelock);
17602d09e03SGordon Ross 
177bd7c6f51SGordon Ross 	ovsa = np->r_secattr;
178bd7c6f51SGordon Ross 	np->r_secattr = smbfs_vsa0;
179bd7c6f51SGordon Ross 	np->r_sectime = 0;
180bd7c6f51SGordon Ross 
18102d09e03SGordon Ross 	oldcr = np->r_cred;
18202d09e03SGordon Ross 	np->r_cred = NULL;
18302d09e03SGordon Ross 
18402d09e03SGordon Ross 	orpath = np->n_rpath;
18502d09e03SGordon Ross 	orplen = np->n_rplen;
18602d09e03SGordon Ross 	np->n_rpath = NULL;
18702d09e03SGordon Ross 	np->n_rplen = 0;
18802d09e03SGordon Ross 
18902d09e03SGordon Ross 	mutex_exit(&np->r_statelock);
19002d09e03SGordon Ross 
1915f4fc069Sjilinxpd 	vp = SMBTOV(np);
1925f4fc069Sjilinxpd 	if (vn_has_cached_data(vp)) {
1935f4fc069Sjilinxpd 		ASSERT3P(vp,==,NULL);
1945f4fc069Sjilinxpd 	}
1955f4fc069Sjilinxpd 
196bd7c6f51SGordon Ross 	if (ovsa.vsa_aclentp != NULL)
197bd7c6f51SGordon Ross 		kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz);
198bd7c6f51SGordon Ross 
19902d09e03SGordon Ross 	if (oldcr != NULL)
20002d09e03SGordon Ross 		crfree(oldcr);
20102d09e03SGordon Ross 
20202d09e03SGordon Ross 	if (orpath != NULL)
20302d09e03SGordon Ross 		kmem_free(orpath, orplen + 1);
2044bff34e3Sthurlow }
2054bff34e3Sthurlow 
2064bff34e3Sthurlow /*
20702d09e03SGordon Ross  * Find and optionally create an smbnode for the passed
20802d09e03SGordon Ross  * mountinfo, directory, separator, and name.  If the
20902d09e03SGordon Ross  * desired smbnode already exists, return a reference.
21002d09e03SGordon Ross  * If the file attributes pointer is non-null, the node
21102d09e03SGordon Ross  * is created if necessary and linked into the AVL tree.
21202d09e03SGordon Ross  *
21302d09e03SGordon Ross  * Callers that need a node created but don't have the
21402d09e03SGordon Ross  * real attributes pass smbfs_fattr0 to force creation.
2154bff34e3Sthurlow  *
21602d09e03SGordon Ross  * Note: make_smbnode() may upgrade the "hash" lock to exclusive.
2174bff34e3Sthurlow  *
2185f4fc069Sjilinxpd  * Based on NFS: nfs_subr.c:makenfsnode
2194bff34e3Sthurlow  */
22002d09e03SGordon Ross smbnode_t *
smbfs_node_findcreate(smbmntinfo_t * mi,const char * dirnm,int dirlen,const char * name,int nmlen,char sep,struct smbfattr * fap)22102d09e03SGordon Ross smbfs_node_findcreate(
22202d09e03SGordon Ross 	smbmntinfo_t *mi,
22302d09e03SGordon Ross 	const char *dirnm,
2244bff34e3Sthurlow 	int dirlen,
2254bff34e3Sthurlow 	const char *name,
2264bff34e3Sthurlow 	int nmlen,
22791d632c8Sgwr 	char sep,
2284bff34e3Sthurlow 	struct smbfattr *fap)
2294bff34e3Sthurlow {
23002d09e03SGordon Ross 	char tmpbuf[256];
23102d09e03SGordon Ross 	size_t rpalloc;
23202d09e03SGordon Ross 	char *p, *rpath;
23302d09e03SGordon Ross 	int rplen;
2344bff34e3Sthurlow 	smbnode_t *np;
2354bff34e3Sthurlow 	vnode_t *vp;
2364bff34e3Sthurlow 	int newnode;
2374bff34e3Sthurlow 
2384bff34e3Sthurlow 	/*
23902d09e03SGordon Ross 	 * Build the search string, either in tmpbuf or
24002d09e03SGordon Ross 	 * in allocated memory if larger than tmpbuf.
2414bff34e3Sthurlow 	 */
2424bff34e3Sthurlow 	rplen = dirlen;
24302d09e03SGordon Ross 	if (sep != '\0')
24491d632c8Sgwr 		rplen++;
24502d09e03SGordon Ross 	rplen += nmlen;
24602d09e03SGordon Ross 	if (rplen < sizeof (tmpbuf)) {
24702d09e03SGordon Ross 		/* use tmpbuf */
24802d09e03SGordon Ross 		rpalloc = 0;
24902d09e03SGordon Ross 		rpath = tmpbuf;
25002d09e03SGordon Ross 	} else {
25102d09e03SGordon Ross 		rpalloc = rplen + 1;
25202d09e03SGordon Ross 		rpath = kmem_alloc(rpalloc, KM_SLEEP);
25302d09e03SGordon Ross 	}
25402d09e03SGordon Ross 	p = rpath;
25502d09e03SGordon Ross 	bcopy(dirnm, p, dirlen);
25602d09e03SGordon Ross 	p += dirlen;
25702d09e03SGordon Ross 	if (sep != '\0')
25802d09e03SGordon Ross 		*p++ = sep;
25902d09e03SGordon Ross 	if (name != NULL) {
26002d09e03SGordon Ross 		bcopy(name, p, nmlen);
26102d09e03SGordon Ross 		p += nmlen;
26202d09e03SGordon Ross 	}
26302d09e03SGordon Ross 	ASSERT(p == rpath + rplen);
2644bff34e3Sthurlow 
2654bff34e3Sthurlow 	/*
26602d09e03SGordon Ross 	 * Find or create a node with this path.
2674bff34e3Sthurlow 	 */
26802d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_READER);
26902d09e03SGordon Ross 	if (fap == NULL)
27002d09e03SGordon Ross 		np = sn_hashfind(mi, rpath, rplen, NULL);
27102d09e03SGordon Ross 	else
27202d09e03SGordon Ross 		np = make_smbnode(mi, rpath, rplen, &newnode);
27302d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
27402d09e03SGordon Ross 
27502d09e03SGordon Ross 	if (rpalloc)
27602d09e03SGordon Ross 		kmem_free(rpath, rpalloc);
2774bff34e3Sthurlow 
2784bff34e3Sthurlow 	if (fap == NULL) {
27902d09e03SGordon Ross 		/*
28002d09e03SGordon Ross 		 * Caller is "just looking" (no create)
28102d09e03SGordon Ross 		 * so np may or may not be NULL here.
28202d09e03SGordon Ross 		 * Either way, we're done.
28302d09e03SGordon Ross 		 */
28402d09e03SGordon Ross 		return (np);
2854bff34e3Sthurlow 	}
2864bff34e3Sthurlow 
28702d09e03SGordon Ross 	/*
28802d09e03SGordon Ross 	 * We should have a node, possibly created.
28902d09e03SGordon Ross 	 * Do we have (real) attributes to apply?
29002d09e03SGordon Ross 	 */
29102d09e03SGordon Ross 	ASSERT(np != NULL);
29202d09e03SGordon Ross 	if (fap == &smbfs_fattr0)
29302d09e03SGordon Ross 		return (np);
2944bff34e3Sthurlow 
29502d09e03SGordon Ross 	/*
29602d09e03SGordon Ross 	 * Apply the given attributes to this node,
29702d09e03SGordon Ross 	 * dealing with any cache impact, etc.
29802d09e03SGordon Ross 	 */
29902d09e03SGordon Ross 	vp = SMBTOV(np);
30002d09e03SGordon Ross 	smbfs_attrcache_fa(vp, fap);
3014bff34e3Sthurlow 
30202d09e03SGordon Ross 	/*
30302d09e03SGordon Ross 	 * Note NFS sets vp->v_type here, assuming it
30402d09e03SGordon Ross 	 * can never change for the life of a node.
30502d09e03SGordon Ross 	 * We allow v_type to change, and set it in
30602d09e03SGordon Ross 	 * smbfs_attrcache().  Also: mode, uid, gid
30702d09e03SGordon Ross 	 */
30802d09e03SGordon Ross 	return (np);
3094bff34e3Sthurlow }
3104bff34e3Sthurlow 
3114bff34e3Sthurlow /*
3125f4fc069Sjilinxpd  * Here NFS has: nfs_subr.c:rtablehash
3134bff34e3Sthurlow  * We use smbfs_hash().
3144bff34e3Sthurlow  */
3154bff34e3Sthurlow 
3164bff34e3Sthurlow /*
3174bff34e3Sthurlow  * Find or create an smbnode.
3185f4fc069Sjilinxpd  * From NFS: nfs_subr.c:make_rnode
3194bff34e3Sthurlow  */
32002d09e03SGordon Ross static smbnode_t *
make_smbnode(smbmntinfo_t * mi,const char * rpath,int rplen,int * newnode)3214bff34e3Sthurlow make_smbnode(
32202d09e03SGordon Ross 	smbmntinfo_t *mi,
32302d09e03SGordon Ross 	const char *rpath,
3244bff34e3Sthurlow 	int rplen,
3254bff34e3Sthurlow 	int *newnode)
3264bff34e3Sthurlow {
3274bff34e3Sthurlow 	smbnode_t *np;
3284bff34e3Sthurlow 	smbnode_t *tnp;
3294bff34e3Sthurlow 	vnode_t *vp;
33002d09e03SGordon Ross 	vfs_t *vfsp;
33102d09e03SGordon Ross 	avl_index_t where;
33202d09e03SGordon Ross 	char *new_rpath = NULL;
3334bff34e3Sthurlow 
33402d09e03SGordon Ross 	ASSERT(RW_READ_HELD(&mi->smi_hash_lk));
33502d09e03SGordon Ross 	vfsp = mi->smi_vfsp;
3364bff34e3Sthurlow 
3374bff34e3Sthurlow start:
33802d09e03SGordon Ross 	np = sn_hashfind(mi, rpath, rplen, NULL);
3394bff34e3Sthurlow 	if (np != NULL) {
3404bff34e3Sthurlow 		*newnode = 0;
34102d09e03SGordon Ross 		return (np);
3424bff34e3Sthurlow 	}
3434bff34e3Sthurlow 
3444bff34e3Sthurlow 	/* Note: will retake this lock below. */
34502d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
3464bff34e3Sthurlow 
3474bff34e3Sthurlow 	/*
3484bff34e3Sthurlow 	 * see if we can find something on the freelist
3494bff34e3Sthurlow 	 */
3504bff34e3Sthurlow 	mutex_enter(&smbfreelist_lock);
3514bff34e3Sthurlow 	if (smbfreelist != NULL && smbnodenew >= nsmbnode) {
3524bff34e3Sthurlow 		np = smbfreelist;
35302d09e03SGordon Ross 		sn_rmfree(np);
3544bff34e3Sthurlow 		mutex_exit(&smbfreelist_lock);
3554bff34e3Sthurlow 
3564bff34e3Sthurlow 		vp = SMBTOV(np);
3574bff34e3Sthurlow 
3584bff34e3Sthurlow 		if (np->r_flags & RHASHED) {
35902d09e03SGordon Ross 			smbmntinfo_t *tmp_mi = np->n_mount;
36002d09e03SGordon Ross 			ASSERT(tmp_mi != NULL);
36102d09e03SGordon Ross 			rw_enter(&tmp_mi->smi_hash_lk, RW_WRITER);
3624bff34e3Sthurlow 			mutex_enter(&vp->v_lock);
3634bff34e3Sthurlow 			if (vp->v_count > 1) {
364ade42b55SSebastien Roy 				VN_RELE_LOCKED(vp);
3654bff34e3Sthurlow 				mutex_exit(&vp->v_lock);
36602d09e03SGordon Ross 				rw_exit(&tmp_mi->smi_hash_lk);
36702d09e03SGordon Ross 				/* start over */
36802d09e03SGordon Ross 				rw_enter(&mi->smi_hash_lk, RW_READER);
3694bff34e3Sthurlow 				goto start;
3704bff34e3Sthurlow 			}
3714bff34e3Sthurlow 			mutex_exit(&vp->v_lock);
37202d09e03SGordon Ross 			sn_rmhash_locked(np);
37302d09e03SGordon Ross 			rw_exit(&tmp_mi->smi_hash_lk);
3744bff34e3Sthurlow 		}
3754bff34e3Sthurlow 
37602d09e03SGordon Ross 		sn_inactive(np);
3774bff34e3Sthurlow 
3784bff34e3Sthurlow 		mutex_enter(&vp->v_lock);
3794bff34e3Sthurlow 		if (vp->v_count > 1) {
380ade42b55SSebastien Roy 			VN_RELE_LOCKED(vp);
3814bff34e3Sthurlow 			mutex_exit(&vp->v_lock);
38202d09e03SGordon Ross 			rw_enter(&mi->smi_hash_lk, RW_READER);
3834bff34e3Sthurlow 			goto start;
3844bff34e3Sthurlow 		}
3854bff34e3Sthurlow 		mutex_exit(&vp->v_lock);
3864bff34e3Sthurlow 		vn_invalid(vp);
3874bff34e3Sthurlow 		/*
3884bff34e3Sthurlow 		 * destroy old locks before bzero'ing and
3894bff34e3Sthurlow 		 * recreating the locks below.
3904bff34e3Sthurlow 		 */
3914bff34e3Sthurlow 		smbfs_rw_destroy(&np->r_rwlock);
3924bff34e3Sthurlow 		smbfs_rw_destroy(&np->r_lkserlock);
3934bff34e3Sthurlow 		mutex_destroy(&np->r_statelock);
3944bff34e3Sthurlow 		cv_destroy(&np->r_cv);
3954bff34e3Sthurlow 		/*
3964bff34e3Sthurlow 		 * Make sure that if smbnode is recycled then
3974bff34e3Sthurlow 		 * VFS count is decremented properly before
3984bff34e3Sthurlow 		 * reuse.
3994bff34e3Sthurlow 		 */
4004bff34e3Sthurlow 		VFS_RELE(vp->v_vfsp);
4014bff34e3Sthurlow 		vn_reinit(vp);
4024bff34e3Sthurlow 	} else {
4034bff34e3Sthurlow 		/*
4044bff34e3Sthurlow 		 * allocate and initialize a new smbnode
4054bff34e3Sthurlow 		 */
4064bff34e3Sthurlow 		vnode_t *new_vp;
4074bff34e3Sthurlow 
4084bff34e3Sthurlow 		mutex_exit(&smbfreelist_lock);
4094bff34e3Sthurlow 
4104bff34e3Sthurlow 		np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
4114bff34e3Sthurlow 		new_vp = vn_alloc(KM_SLEEP);
4124bff34e3Sthurlow 
4131a5e258fSJosef 'Jeff' Sipek 		atomic_inc_ulong((ulong_t *)&smbnodenew);
4144bff34e3Sthurlow 		vp = new_vp;
4154bff34e3Sthurlow 	}
4164bff34e3Sthurlow 
41702d09e03SGordon Ross 	/*
41802d09e03SGordon Ross 	 * Allocate and copy the rpath we'll need below.
41902d09e03SGordon Ross 	 */
42002d09e03SGordon Ross 	new_rpath = kmem_alloc(rplen + 1, KM_SLEEP);
42102d09e03SGordon Ross 	bcopy(rpath, new_rpath, rplen);
42202d09e03SGordon Ross 	new_rpath[rplen] = '\0';
42302d09e03SGordon Ross 
4244bff34e3Sthurlow 	/* Initialize smbnode_t */
4254bff34e3Sthurlow 	bzero(np, sizeof (*np));
4264bff34e3Sthurlow 
4274bff34e3Sthurlow 	smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
4284bff34e3Sthurlow 	smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
4294bff34e3Sthurlow 	mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
4304bff34e3Sthurlow 	cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
4314bff34e3Sthurlow 	/* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
4324bff34e3Sthurlow 
4334bff34e3Sthurlow 	np->r_vnode = vp;
4344bff34e3Sthurlow 	np->n_mount = mi;
43502d09e03SGordon Ross 
436*adee6784SGordon Ross 	np->n_fid = NULL;
43702d09e03SGordon Ross 	np->n_uid = mi->smi_uid;
43802d09e03SGordon Ross 	np->n_gid = mi->smi_gid;
43902d09e03SGordon Ross 	/* Leave attributes "stale." */
4404bff34e3Sthurlow 
4414bff34e3Sthurlow 	/*
4425f4fc069Sjilinxpd 	 * Here NFS has avl_create(&np->r_dir, ...)
4435f4fc069Sjilinxpd 	 * for the readdir cache (not used here).
4444bff34e3Sthurlow 	 */
4454bff34e3Sthurlow 
4464bff34e3Sthurlow 	/* Now fill in the vnode. */
4474bff34e3Sthurlow 	vn_setops(vp, smbfs_vnodeops);
4484bff34e3Sthurlow 	vp->v_data = (caddr_t)np;
4494bff34e3Sthurlow 	VFS_HOLD(vfsp);
4504bff34e3Sthurlow 	vp->v_vfsp = vfsp;
4514bff34e3Sthurlow 	vp->v_type = VNON;
4524bff34e3Sthurlow 
4534bff34e3Sthurlow 	/*
45402d09e03SGordon Ross 	 * We entered with mi->smi_hash_lk held (reader).
45502d09e03SGordon Ross 	 * Retake it now, (as the writer).
45602d09e03SGordon Ross 	 * Will return with it held.
4574bff34e3Sthurlow 	 */
45802d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_WRITER);
45902d09e03SGordon Ross 
46002d09e03SGordon Ross 	/*
46102d09e03SGordon Ross 	 * There is a race condition where someone else
46202d09e03SGordon Ross 	 * may alloc the smbnode while no locks are held,
46302d09e03SGordon Ross 	 * so check again and recover if found.
46402d09e03SGordon Ross 	 */
46502d09e03SGordon Ross 	tnp = sn_hashfind(mi, rpath, rplen, &where);
4664bff34e3Sthurlow 	if (tnp != NULL) {
46702d09e03SGordon Ross 		/*
46802d09e03SGordon Ross 		 * Lost the race.  Put the node we were building
46902d09e03SGordon Ross 		 * on the free list and return the one we found.
47002d09e03SGordon Ross 		 */
47102d09e03SGordon Ross 		rw_exit(&mi->smi_hash_lk);
47202d09e03SGordon Ross 		kmem_free(new_rpath, rplen + 1);
47302d09e03SGordon Ross 		smbfs_addfree(np);
47402d09e03SGordon Ross 		rw_enter(&mi->smi_hash_lk, RW_READER);
4754bff34e3Sthurlow 		*newnode = 0;
47602d09e03SGordon Ross 		return (tnp);
4774bff34e3Sthurlow 	}
4784bff34e3Sthurlow 
4794bff34e3Sthurlow 	/*
48002d09e03SGordon Ross 	 * Hash search identifies nodes by the remote path
48102d09e03SGordon Ross 	 * (n_rpath) so fill that in now, before linking
48202d09e03SGordon Ross 	 * this node into the node cache (AVL tree).
4834bff34e3Sthurlow 	 */
48402d09e03SGordon Ross 	np->n_rpath = new_rpath;
4854bff34e3Sthurlow 	np->n_rplen = rplen;
48602d09e03SGordon Ross 	np->n_ino = smbfs_gethash(new_rpath, rplen);
4874bff34e3Sthurlow 
48802d09e03SGordon Ross 	sn_addhash_locked(np, where);
4894bff34e3Sthurlow 	*newnode = 1;
49002d09e03SGordon Ross 	return (np);
4914bff34e3Sthurlow }
4924bff34e3Sthurlow 
4934bff34e3Sthurlow /*
49402d09e03SGordon Ross  * smbfs_addfree
49502d09e03SGordon Ross  * Put an smbnode on the free list, or destroy it immediately
49602d09e03SGordon Ross  * if it offers no value were it to be reclaimed later.  Also
49702d09e03SGordon Ross  * destroy immediately when we have too many smbnodes, etc.
4984bff34e3Sthurlow  *
4994bff34e3Sthurlow  * Normally called by smbfs_inactive, but also
5004bff34e3Sthurlow  * called in here during cleanup operations.
5014bff34e3Sthurlow  *
5025f4fc069Sjilinxpd  * From NFS: nfs_subr.c:rp_addfree
5034bff34e3Sthurlow  */
5044bff34e3Sthurlow void
smbfs_addfree(smbnode_t * np)50502d09e03SGordon Ross smbfs_addfree(smbnode_t *np)
5064bff34e3Sthurlow {
5074bff34e3Sthurlow 	vnode_t *vp;
5084bff34e3Sthurlow 	struct vfs *vfsp;
50902d09e03SGordon Ross 	smbmntinfo_t *mi;
51002d09e03SGordon Ross 
51102d09e03SGordon Ross 	ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
5124bff34e3Sthurlow 
5134bff34e3Sthurlow 	vp = SMBTOV(np);
5144bff34e3Sthurlow 	ASSERT(vp->v_count >= 1);
51502d09e03SGordon Ross 
51602d09e03SGordon Ross 	vfsp = vp->v_vfsp;
51702d09e03SGordon Ross 	mi = VFTOSMI(vfsp);
5184bff34e3Sthurlow 
5194bff34e3Sthurlow 	/*
52002d09e03SGordon Ross 	 * If there are no more references to this smbnode and:
52102d09e03SGordon Ross 	 * we have too many smbnodes allocated, or if the node
52202d09e03SGordon Ross 	 * is no longer accessible via the AVL tree (!RHASHED),
52302d09e03SGordon Ross 	 * or an i/o error occurred while writing to the file,
52402d09e03SGordon Ross 	 * or it's part of an unmounted FS, then try to destroy
52502d09e03SGordon Ross 	 * it instead of putting it on the smbnode freelist.
5264bff34e3Sthurlow 	 */
52702d09e03SGordon Ross 	if (np->r_count == 0 && (
52802d09e03SGordon Ross 	    (np->r_flags & RHASHED) == 0 ||
52902d09e03SGordon Ross 	    (np->r_error != 0) ||
53002d09e03SGordon Ross 	    (vfsp->vfs_flag & VFS_UNMOUNTED) ||
53102d09e03SGordon Ross 	    (smbnodenew > nsmbnode))) {
53202d09e03SGordon Ross 
53302d09e03SGordon Ross 		/* Try to destroy this node. */
53402d09e03SGordon Ross 
5354bff34e3Sthurlow 		if (np->r_flags & RHASHED) {
53602d09e03SGordon Ross 			rw_enter(&mi->smi_hash_lk, RW_WRITER);
5374bff34e3Sthurlow 			mutex_enter(&vp->v_lock);
5384bff34e3Sthurlow 			if (vp->v_count > 1) {
539ade42b55SSebastien Roy 				VN_RELE_LOCKED(vp);
5404bff34e3Sthurlow 				mutex_exit(&vp->v_lock);
54102d09e03SGordon Ross 				rw_exit(&mi->smi_hash_lk);
5424bff34e3Sthurlow 				return;
5434bff34e3Sthurlow 				/*
5444bff34e3Sthurlow 				 * Will get another call later,
5454bff34e3Sthurlow 				 * via smbfs_inactive.
5464bff34e3Sthurlow 				 */
5474bff34e3Sthurlow 			}
5484bff34e3Sthurlow 			mutex_exit(&vp->v_lock);
54902d09e03SGordon Ross 			sn_rmhash_locked(np);
55002d09e03SGordon Ross 			rw_exit(&mi->smi_hash_lk);
5514bff34e3Sthurlow 		}
5524bff34e3Sthurlow 
55302d09e03SGordon Ross 		sn_inactive(np);
5544bff34e3Sthurlow 
5554bff34e3Sthurlow 		/*
5564bff34e3Sthurlow 		 * Recheck the vnode reference count.  We need to
5574bff34e3Sthurlow 		 * make sure that another reference has not been
5584bff34e3Sthurlow 		 * acquired while we were not holding v_lock.  The
55902d09e03SGordon Ross 		 * smbnode is not in the smbnode "hash" AVL tree, so
56002d09e03SGordon Ross 		 * the only way for a reference to have been acquired
5614bff34e3Sthurlow 		 * is for a VOP_PUTPAGE because the smbnode was marked
56202d09e03SGordon Ross 		 * with RDIRTY or for a modified page.  This vnode
5634bff34e3Sthurlow 		 * reference may have been acquired before our call
56402d09e03SGordon Ross 		 * to sn_inactive.  The i/o may have been completed,
56502d09e03SGordon Ross 		 * thus allowing sn_inactive to complete, but the
5664bff34e3Sthurlow 		 * reference to the vnode may not have been released
5674bff34e3Sthurlow 		 * yet.  In any case, the smbnode can not be destroyed
5684bff34e3Sthurlow 		 * until the other references to this vnode have been
5694bff34e3Sthurlow 		 * released.  The other references will take care of
5704bff34e3Sthurlow 		 * either destroying the smbnode or placing it on the
5714bff34e3Sthurlow 		 * smbnode freelist.  If there are no other references,
5724bff34e3Sthurlow 		 * then the smbnode may be safely destroyed.
5734bff34e3Sthurlow 		 */
5744bff34e3Sthurlow 		mutex_enter(&vp->v_lock);
5754bff34e3Sthurlow 		if (vp->v_count > 1) {
576ade42b55SSebastien Roy 			VN_RELE_LOCKED(vp);
5774bff34e3Sthurlow 			mutex_exit(&vp->v_lock);
5784bff34e3Sthurlow 			return;
5794bff34e3Sthurlow 		}
5804bff34e3Sthurlow 		mutex_exit(&vp->v_lock);
5814bff34e3Sthurlow 
58202d09e03SGordon Ross 		sn_destroy_node(np);
5834bff34e3Sthurlow 		return;
5844bff34e3Sthurlow 	}
58502d09e03SGordon Ross 
5864bff34e3Sthurlow 	/*
58702d09e03SGordon Ross 	 * Lock the AVL tree and then recheck the reference count
5884bff34e3Sthurlow 	 * to ensure that no other threads have acquired a reference
5894bff34e3Sthurlow 	 * to indicate that the smbnode should not be placed on the
5904bff34e3Sthurlow 	 * freelist.  If another reference has been acquired, then
5914bff34e3Sthurlow 	 * just release this one and let the other thread complete
5924bff34e3Sthurlow 	 * the processing of adding this smbnode to the freelist.
5934bff34e3Sthurlow 	 */
59402d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_WRITER);
5954bff34e3Sthurlow 
5964bff34e3Sthurlow 	mutex_enter(&vp->v_lock);
5974bff34e3Sthurlow 	if (vp->v_count > 1) {
598ade42b55SSebastien Roy 		VN_RELE_LOCKED(vp);
5994bff34e3Sthurlow 		mutex_exit(&vp->v_lock);
60002d09e03SGordon Ross 		rw_exit(&mi->smi_hash_lk);
6014bff34e3Sthurlow 		return;
6024bff34e3Sthurlow 	}
6034bff34e3Sthurlow 	mutex_exit(&vp->v_lock);
6044bff34e3Sthurlow 
6054bff34e3Sthurlow 	/*
60602d09e03SGordon Ross 	 * Put this node on the free list.
6074bff34e3Sthurlow 	 */
6084bff34e3Sthurlow 	mutex_enter(&smbfreelist_lock);
6094bff34e3Sthurlow 	if (smbfreelist == NULL) {
6104bff34e3Sthurlow 		np->r_freef = np;
6114bff34e3Sthurlow 		np->r_freeb = np;
6124bff34e3Sthurlow 		smbfreelist = np;
6134bff34e3Sthurlow 	} else {
6144bff34e3Sthurlow 		np->r_freef = smbfreelist;
6154bff34e3Sthurlow 		np->r_freeb = smbfreelist->r_freeb;
6164bff34e3Sthurlow 		smbfreelist->r_freeb->r_freef = np;
6174bff34e3Sthurlow 		smbfreelist->r_freeb = np;
6184bff34e3Sthurlow 	}
6194bff34e3Sthurlow 	mutex_exit(&smbfreelist_lock);
6204bff34e3Sthurlow 
62102d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
6224bff34e3Sthurlow }
6234bff34e3Sthurlow 
6244bff34e3Sthurlow /*
6254bff34e3Sthurlow  * Remove an smbnode from the free list.
6264bff34e3Sthurlow  *
6274bff34e3Sthurlow  * The caller must be holding smbfreelist_lock and the smbnode
6284bff34e3Sthurlow  * must be on the freelist.
6294bff34e3Sthurlow  *
6305f4fc069Sjilinxpd  * From NFS: nfs_subr.c:rp_rmfree
6314bff34e3Sthurlow  */
63202d09e03SGordon Ross static void
sn_rmfree(smbnode_t * np)63302d09e03SGordon Ross sn_rmfree(smbnode_t *np)
6344bff34e3Sthurlow {
6354bff34e3Sthurlow 
6364bff34e3Sthurlow 	ASSERT(MUTEX_HELD(&smbfreelist_lock));
6374bff34e3Sthurlow 	ASSERT(np->r_freef != NULL && np->r_freeb != NULL);
6384bff34e3Sthurlow 
6394bff34e3Sthurlow 	if (np == smbfreelist) {
6404bff34e3Sthurlow 		smbfreelist = np->r_freef;
6414bff34e3Sthurlow 		if (np == smbfreelist)
6424bff34e3Sthurlow 			smbfreelist = NULL;
6434bff34e3Sthurlow 	}
6444bff34e3Sthurlow 
6454bff34e3Sthurlow 	np->r_freeb->r_freef = np->r_freef;
6464bff34e3Sthurlow 	np->r_freef->r_freeb = np->r_freeb;
6474bff34e3Sthurlow 
6484bff34e3Sthurlow 	np->r_freef = np->r_freeb = NULL;
6494bff34e3Sthurlow }
6504bff34e3Sthurlow 
6514bff34e3Sthurlow /*
65202d09e03SGordon Ross  * Put an smbnode in the "hash" AVL tree.
6534bff34e3Sthurlow  *
65402d09e03SGordon Ross  * The caller must be hold the rwlock as writer.
6554bff34e3Sthurlow  *
6565f4fc069Sjilinxpd  * From NFS: nfs_subr.c:rp_addhash
6574bff34e3Sthurlow  */
65802d09e03SGordon Ross static void
sn_addhash_locked(smbnode_t * np,avl_index_t where)65902d09e03SGordon Ross sn_addhash_locked(smbnode_t *np, avl_index_t where)
6604bff34e3Sthurlow {
66102d09e03SGordon Ross 	smbmntinfo_t *mi = np->n_mount;
6624bff34e3Sthurlow 
66302d09e03SGordon Ross 	ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
6644bff34e3Sthurlow 
6654bff34e3Sthurlow 	mutex_enter(&np->r_statelock);
666ff1e230cSjilinxpd 	if ((np->r_flags & RHASHED) == 0) {
667ff1e230cSjilinxpd 		avl_insert(&mi->smi_hash_avl, np, where);
668ff1e230cSjilinxpd 		np->r_flags |= RHASHED;
669ff1e230cSjilinxpd 	}
6704bff34e3Sthurlow 	mutex_exit(&np->r_statelock);
6714bff34e3Sthurlow }
6724bff34e3Sthurlow 
6734bff34e3Sthurlow /*
67402d09e03SGordon Ross  * Remove an smbnode from the "hash" AVL tree.
6754bff34e3Sthurlow  *
67602d09e03SGordon Ross  * The caller must hold the rwlock as writer.
6774bff34e3Sthurlow  *
6785f4fc069Sjilinxpd  * From NFS: nfs_subr.c:rp_rmhash_locked
6794bff34e3Sthurlow  */
68002d09e03SGordon Ross static void
sn_rmhash_locked(smbnode_t * np)68102d09e03SGordon Ross sn_rmhash_locked(smbnode_t *np)
6824bff34e3Sthurlow {
68302d09e03SGordon Ross 	smbmntinfo_t *mi = np->n_mount;
6844bff34e3Sthurlow 
68502d09e03SGordon Ross 	ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk));
6864bff34e3Sthurlow 
6874bff34e3Sthurlow 	mutex_enter(&np->r_statelock);
688ff1e230cSjilinxpd 	if ((np->r_flags & RHASHED) != 0) {
689ff1e230cSjilinxpd 		np->r_flags &= ~RHASHED;
690ff1e230cSjilinxpd 		avl_remove(&mi->smi_hash_avl, np);
691ff1e230cSjilinxpd 	}
6924bff34e3Sthurlow 	mutex_exit(&np->r_statelock);
6934bff34e3Sthurlow }
6944bff34e3Sthurlow 
6954bff34e3Sthurlow /*
69602d09e03SGordon Ross  * Remove an smbnode from the "hash" AVL tree.
6974bff34e3Sthurlow  *
69802d09e03SGordon Ross  * The caller must not be holding the rwlock.
6994bff34e3Sthurlow  */
7004bff34e3Sthurlow void
smbfs_rmhash(smbnode_t * np)70102d09e03SGordon Ross smbfs_rmhash(smbnode_t *np)
7024bff34e3Sthurlow {
70302d09e03SGordon Ross 	smbmntinfo_t *mi = np->n_mount;
7044bff34e3Sthurlow 
70502d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_WRITER);
70602d09e03SGordon Ross 	sn_rmhash_locked(np);
70702d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
7084bff34e3Sthurlow }
7094bff34e3Sthurlow 
7104bff34e3Sthurlow /*
71102d09e03SGordon Ross  * Lookup an smbnode by remote pathname
7124bff34e3Sthurlow  *
71302d09e03SGordon Ross  * The caller must be holding the AVL rwlock, either shared or exclusive.
7144bff34e3Sthurlow  *
7155f4fc069Sjilinxpd  * From NFS: nfs_subr.c:rfind
7164bff34e3Sthurlow  */
71702d09e03SGordon Ross static smbnode_t *
sn_hashfind(smbmntinfo_t * mi,const char * rpath,int rplen,avl_index_t * pwhere)71802d09e03SGordon Ross sn_hashfind(
71902d09e03SGordon Ross 	smbmntinfo_t *mi,
7204bff34e3Sthurlow 	const char *rpath,
7214bff34e3Sthurlow 	int rplen,
72202d09e03SGordon Ross 	avl_index_t *pwhere) /* optional */
7234bff34e3Sthurlow {
72402d09e03SGordon Ross 	smbfs_node_hdr_t nhdr;
7254bff34e3Sthurlow 	smbnode_t *np;
7264bff34e3Sthurlow 	vnode_t *vp;
7274bff34e3Sthurlow 
72802d09e03SGordon Ross 	ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk));
7294bff34e3Sthurlow 
73002d09e03SGordon Ross 	bzero(&nhdr, sizeof (nhdr));
73102d09e03SGordon Ross 	nhdr.hdr_n_rpath = (char *)rpath;
73202d09e03SGordon Ross 	nhdr.hdr_n_rplen = rplen;
73302d09e03SGordon Ross 
73402d09e03SGordon Ross 	/* See smbfs_node_cmp below. */
73502d09e03SGordon Ross 	np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere);
73602d09e03SGordon Ross 
73702d09e03SGordon Ross 	if (np == NULL)
73802d09e03SGordon Ross 		return (NULL);
73902d09e03SGordon Ross 
74002d09e03SGordon Ross 	/*
74102d09e03SGordon Ross 	 * Found it in the "hash" AVL tree.
74202d09e03SGordon Ross 	 * Remove from free list, if necessary.
74302d09e03SGordon Ross 	 */
74402d09e03SGordon Ross 	vp = SMBTOV(np);
74502d09e03SGordon Ross 	if (np->r_freef != NULL) {
74602d09e03SGordon Ross 		mutex_enter(&smbfreelist_lock);
74702d09e03SGordon Ross 		/*
74802d09e03SGordon Ross 		 * If the smbnode is on the freelist,
74902d09e03SGordon Ross 		 * then remove it and use that reference
75002d09e03SGordon Ross 		 * as the new reference.  Otherwise,
75102d09e03SGordon Ross 		 * need to increment the reference count.
75202d09e03SGordon Ross 		 */
75302d09e03SGordon Ross 		if (np->r_freef != NULL) {
75402d09e03SGordon Ross 			sn_rmfree(np);
75502d09e03SGordon Ross 			mutex_exit(&smbfreelist_lock);
75602d09e03SGordon Ross 		} else {
75702d09e03SGordon Ross 			mutex_exit(&smbfreelist_lock);
75802d09e03SGordon Ross 			VN_HOLD(vp);
7594bff34e3Sthurlow 		}
76002d09e03SGordon Ross 	} else
76102d09e03SGordon Ross 		VN_HOLD(vp);
76202d09e03SGordon Ross 
76302d09e03SGordon Ross 	return (np);
76402d09e03SGordon Ross }
76502d09e03SGordon Ross 
76602d09e03SGordon Ross static int
smbfs_node_cmp(const void * va,const void * vb)76702d09e03SGordon Ross smbfs_node_cmp(const void *va, const void *vb)
76802d09e03SGordon Ross {
76902d09e03SGordon Ross 	const smbfs_node_hdr_t *a = va;
77002d09e03SGordon Ross 	const smbfs_node_hdr_t *b = vb;
77102d09e03SGordon Ross 	int clen, diff;
77202d09e03SGordon Ross 
77302d09e03SGordon Ross 	/*
77402d09e03SGordon Ross 	 * Same semantics as strcmp, but does not
77502d09e03SGordon Ross 	 * assume the strings are null terminated.
77602d09e03SGordon Ross 	 */
77702d09e03SGordon Ross 	clen = (a->hdr_n_rplen < b->hdr_n_rplen) ?
77802d09e03SGordon Ross 	    a->hdr_n_rplen : b->hdr_n_rplen;
77902d09e03SGordon Ross 	diff = strncmp(a->hdr_n_rpath, b->hdr_n_rpath, clen);
78002d09e03SGordon Ross 	if (diff < 0)
78102d09e03SGordon Ross 		return (-1);
78202d09e03SGordon Ross 	if (diff > 0)
78302d09e03SGordon Ross 		return (1);
78402d09e03SGordon Ross 	/* they match through clen */
78502d09e03SGordon Ross 	if (b->hdr_n_rplen > clen)
78602d09e03SGordon Ross 		return (-1);
78702d09e03SGordon Ross 	if (a->hdr_n_rplen > clen)
78802d09e03SGordon Ross 		return (1);
78902d09e03SGordon Ross 	return (0);
79002d09e03SGordon Ross }
79102d09e03SGordon Ross 
79202d09e03SGordon Ross /*
79302d09e03SGordon Ross  * Setup the "hash" AVL tree used for our node cache.
79402d09e03SGordon Ross  * See: smbfs_mount, smbfs_destroy_table.
79502d09e03SGordon Ross  */
79602d09e03SGordon Ross void
smbfs_init_hash_avl(avl_tree_t * avl)79702d09e03SGordon Ross smbfs_init_hash_avl(avl_tree_t *avl)
79802d09e03SGordon Ross {
79902d09e03SGordon Ross 	avl_create(avl, smbfs_node_cmp, sizeof (smbnode_t),
80002d09e03SGordon Ross 	    offsetof(smbnode_t, r_avl_node));
80102d09e03SGordon Ross }
80202d09e03SGordon Ross 
80302d09e03SGordon Ross /*
80402d09e03SGordon Ross  * Invalidate the cached attributes for all nodes "under" the
80502d09e03SGordon Ross  * passed-in node.  Note: the passed-in node is NOT affected by
80602d09e03SGordon Ross  * this call.  This is used both for files under some directory
80702d09e03SGordon Ross  * after the directory is deleted or renamed, and for extended
80802d09e03SGordon Ross  * attribute files (named streams) under a plain file after that
80902d09e03SGordon Ross  * file is renamed or deleted.
81002d09e03SGordon Ross  *
81102d09e03SGordon Ross  * Do this by walking the AVL tree starting at the passed in node,
81202d09e03SGordon Ross  * and continuing while the visited nodes have a path prefix matching
81302d09e03SGordon Ross  * the entire path of the passed-in node, and a separator just after
81402d09e03SGordon Ross  * that matching path prefix.  Watch out for cases where the AVL tree
81502d09e03SGordon Ross  * order may not exactly match the order of an FS walk, i.e.
81602d09e03SGordon Ross  * consider this sequence:
81702d09e03SGordon Ross  *	"foo"		(directory)
81802d09e03SGordon Ross  *	"foo bar"	(name containing a space)
81902d09e03SGordon Ross  *	"foo/bar"
82002d09e03SGordon Ross  * The walk needs to skip "foo bar" and keep going until it finds
82102d09e03SGordon Ross  * something that doesn't match the "foo" name prefix.
82202d09e03SGordon Ross  */
82302d09e03SGordon Ross void
smbfs_attrcache_prune(smbnode_t * top_np)82402d09e03SGordon Ross smbfs_attrcache_prune(smbnode_t *top_np)
82502d09e03SGordon Ross {
82602d09e03SGordon Ross 	smbmntinfo_t *mi;
82702d09e03SGordon Ross 	smbnode_t *np;
82802d09e03SGordon Ross 	char *rpath;
82902d09e03SGordon Ross 	int rplen;
83002d09e03SGordon Ross 
83102d09e03SGordon Ross 	mi = top_np->n_mount;
83202d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_READER);
83302d09e03SGordon Ross 
83402d09e03SGordon Ross 	np = top_np;
83502d09e03SGordon Ross 	rpath = top_np->n_rpath;
83602d09e03SGordon Ross 	rplen = top_np->n_rplen;
83702d09e03SGordon Ross 	for (;;) {
83802d09e03SGordon Ross 		np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER);
83902d09e03SGordon Ross 		if (np == NULL)
84002d09e03SGordon Ross 			break;
84102d09e03SGordon Ross 		if (np->n_rplen < rplen)
84202d09e03SGordon Ross 			break;
84302d09e03SGordon Ross 		if (0 != strncmp(np->n_rpath, rpath, rplen))
84402d09e03SGordon Ross 			break;
84502d09e03SGordon Ross 		if (np->n_rplen > rplen && (
84602d09e03SGordon Ross 		    np->n_rpath[rplen] == ':' ||
84702d09e03SGordon Ross 		    np->n_rpath[rplen] == '\\'))
84802d09e03SGordon Ross 			smbfs_attrcache_remove(np);
8494bff34e3Sthurlow 	}
85002d09e03SGordon Ross 
85102d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
8524bff34e3Sthurlow }
8534bff34e3Sthurlow 
8544bff34e3Sthurlow #ifdef SMB_VNODE_DEBUG
85502d09e03SGordon Ross int smbfs_check_table_debug = 1;
8564bff34e3Sthurlow #else /* SMB_VNODE_DEBUG */
85702d09e03SGordon Ross int smbfs_check_table_debug = 0;
8584bff34e3Sthurlow #endif /* SMB_VNODE_DEBUG */
8594bff34e3Sthurlow 
8604bff34e3Sthurlow 
8614bff34e3Sthurlow /*
8624bff34e3Sthurlow  * Return 1 if there is a active vnode belonging to this vfs in the
86302d09e03SGordon Ross  * smbnode cache.
8644bff34e3Sthurlow  *
8654bff34e3Sthurlow  * Several of these checks are done without holding the usual
86602d09e03SGordon Ross  * locks.  This is safe because destroy_smbtable(), smbfs_addfree(),
8674bff34e3Sthurlow  * etc. will redo the necessary checks before actually destroying
8684bff34e3Sthurlow  * any smbnodes.
8694bff34e3Sthurlow  *
8705f4fc069Sjilinxpd  * From NFS: nfs_subr.c:check_rtable
8714bff34e3Sthurlow  *
8724bff34e3Sthurlow  * Debugging changes here relative to NFS.
8734bff34e3Sthurlow  * Relatively harmless, so left 'em in.
8744bff34e3Sthurlow  */
8754bff34e3Sthurlow int
smbfs_check_table(struct vfs * vfsp,smbnode_t * rtnp)87602d09e03SGordon Ross smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp)
8774bff34e3Sthurlow {
87802d09e03SGordon Ross 	smbmntinfo_t *mi;
8794bff34e3Sthurlow 	smbnode_t *np;
8804bff34e3Sthurlow 	vnode_t *vp;
8814bff34e3Sthurlow 	int busycnt = 0;
8824bff34e3Sthurlow 
88302d09e03SGordon Ross 	mi = VFTOSMI(vfsp);
88402d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_READER);
88502d09e03SGordon Ross 	for (np = avl_first(&mi->smi_hash_avl); np != NULL;
88602d09e03SGordon Ross 	    np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
8874bff34e3Sthurlow 
88802d09e03SGordon Ross 		if (np == rtnp)
88902d09e03SGordon Ross 			continue; /* skip the root */
89002d09e03SGordon Ross 		vp = SMBTOV(np);
8914bff34e3Sthurlow 
89202d09e03SGordon Ross 		/* Now the 'busy' checks: */
89302d09e03SGordon Ross 		/* Not on the free list? */
89402d09e03SGordon Ross 		if (np->r_freef == NULL) {
89502d09e03SGordon Ross 			SMBVDEBUG("!r_freef: node=0x%p, rpath=%s\n",
89602d09e03SGordon Ross 			    (void *)np, np->n_rpath);
89702d09e03SGordon Ross 			busycnt++;
89802d09e03SGordon Ross 		}
8994bff34e3Sthurlow 
90002d09e03SGordon Ross 		/* Has dirty pages? */
90102d09e03SGordon Ross 		if (vn_has_cached_data(vp) &&
90202d09e03SGordon Ross 		    (np->r_flags & RDIRTY)) {
90302d09e03SGordon Ross 			SMBVDEBUG("is dirty: node=0x%p, rpath=%s\n",
90402d09e03SGordon Ross 			    (void *)np, np->n_rpath);
90502d09e03SGordon Ross 			busycnt++;
90602d09e03SGordon Ross 		}
9074bff34e3Sthurlow 
90802d09e03SGordon Ross 		/* Other refs? (not reflected in v_count) */
90902d09e03SGordon Ross 		if (np->r_count > 0) {
91002d09e03SGordon Ross 			SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n",
91102d09e03SGordon Ross 			    (void *)np, np->n_rpath);
91202d09e03SGordon Ross 			busycnt++;
9134bff34e3Sthurlow 		}
91402d09e03SGordon Ross 
91502d09e03SGordon Ross 		if (busycnt && !smbfs_check_table_debug)
91602d09e03SGordon Ross 			break;
91702d09e03SGordon Ross 
9184bff34e3Sthurlow 	}
91902d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
92002d09e03SGordon Ross 
9214bff34e3Sthurlow 	return (busycnt);
9224bff34e3Sthurlow }
9234bff34e3Sthurlow 
9244bff34e3Sthurlow /*
92502d09e03SGordon Ross  * Destroy inactive vnodes from the AVL tree which belong to this
9264bff34e3Sthurlow  * vfs.  It is essential that we destroy all inactive vnodes during a
9274bff34e3Sthurlow  * forced unmount as well as during a normal unmount.
9284bff34e3Sthurlow  *
9295f4fc069Sjilinxpd  * Based on NFS: nfs_subr.c:destroy_rtable
93002d09e03SGordon Ross  *
93102d09e03SGordon Ross  * In here, we're normally destrying all or most of the AVL tree,
93202d09e03SGordon Ross  * so the natural choice is to use avl_destroy_nodes.  However,
93302d09e03SGordon Ross  * there may be a few busy nodes that should remain in the AVL
93402d09e03SGordon Ross  * tree when we're done.  The solution: use a temporary tree to
93502d09e03SGordon Ross  * hold the busy nodes until we're done destroying the old tree,
93602d09e03SGordon Ross  * then copy the temporary tree over the (now emtpy) real tree.
9374bff34e3Sthurlow  */
9384bff34e3Sthurlow void
smbfs_destroy_table(struct vfs * vfsp)9394bff34e3Sthurlow smbfs_destroy_table(struct vfs *vfsp)
9404bff34e3Sthurlow {
94102d09e03SGordon Ross 	avl_tree_t tmp_avl;
94202d09e03SGordon Ross 	smbmntinfo_t *mi;
9434bff34e3Sthurlow 	smbnode_t *np;
9444bff34e3Sthurlow 	smbnode_t *rlist;
94502d09e03SGordon Ross 	void *v;
9464bff34e3Sthurlow 
94702d09e03SGordon Ross 	mi = VFTOSMI(vfsp);
9484bff34e3Sthurlow 	rlist = NULL;
94902d09e03SGordon Ross 	smbfs_init_hash_avl(&tmp_avl);
9504bff34e3Sthurlow 
95102d09e03SGordon Ross 	rw_enter(&mi->smi_hash_lk, RW_WRITER);
95202d09e03SGordon Ross 	v = NULL;
95302d09e03SGordon Ross 	while ((np = avl_destroy_nodes(&mi->smi_hash_avl, &v)) != NULL) {
95402d09e03SGordon Ross 
95502d09e03SGordon Ross 		mutex_enter(&smbfreelist_lock);
95602d09e03SGordon Ross 		if (np->r_freef == NULL) {
95702d09e03SGordon Ross 			/*
95802d09e03SGordon Ross 			 * Busy node (not on the free list).
95902d09e03SGordon Ross 			 * Will keep in the final AVL tree.
96002d09e03SGordon Ross 			 */
96102d09e03SGordon Ross 			mutex_exit(&smbfreelist_lock);
96202d09e03SGordon Ross 			avl_add(&tmp_avl, np);
96302d09e03SGordon Ross 		} else {
96402d09e03SGordon Ross 			/*
96502d09e03SGordon Ross 			 * It's on the free list.  Remove and
96602d09e03SGordon Ross 			 * arrange for it to be destroyed.
96702d09e03SGordon Ross 			 */
96802d09e03SGordon Ross 			sn_rmfree(np);
96902d09e03SGordon Ross 			mutex_exit(&smbfreelist_lock);
97002d09e03SGordon Ross 
97102d09e03SGordon Ross 			/*
97202d09e03SGordon Ross 			 * Last part of sn_rmhash_locked().
97302d09e03SGordon Ross 			 * NB: avl_destroy_nodes has already
97402d09e03SGordon Ross 			 * removed this from the "hash" AVL.
97502d09e03SGordon Ross 			 */
97602d09e03SGordon Ross 			mutex_enter(&np->r_statelock);
97702d09e03SGordon Ross 			np->r_flags &= ~RHASHED;
97802d09e03SGordon Ross 			mutex_exit(&np->r_statelock);
97902d09e03SGordon Ross 
98002d09e03SGordon Ross 			/*
98102d09e03SGordon Ross 			 * Add to the list of nodes to destroy.
98202d09e03SGordon Ross 			 * Borrowing avl_child[0] for this list.
98302d09e03SGordon Ross 			 */
98402d09e03SGordon Ross 			np->r_avl_node.avl_child[0] =
98502d09e03SGordon Ross 			    (struct avl_node *)rlist;
98602d09e03SGordon Ross 			rlist = np;
9874bff34e3Sthurlow 		}
9884bff34e3Sthurlow 	}
98902d09e03SGordon Ross 	avl_destroy(&mi->smi_hash_avl);
9904bff34e3Sthurlow 
99102d09e03SGordon Ross 	/*
99202d09e03SGordon Ross 	 * Replace the (now destroyed) "hash" AVL with the
99302d09e03SGordon Ross 	 * temporary AVL, which restores the busy nodes.
99402d09e03SGordon Ross 	 */
99502d09e03SGordon Ross 	mi->smi_hash_avl = tmp_avl;
99602d09e03SGordon Ross 	rw_exit(&mi->smi_hash_lk);
9974bff34e3Sthurlow 
99802d09e03SGordon Ross 	/*
99902d09e03SGordon Ross 	 * Now destroy the nodes on our temporary list (rlist).
100002d09e03SGordon Ross 	 * This call to smbfs_addfree will end up destroying the
100102d09e03SGordon Ross 	 * smbnode, but in a safe way with the appropriate set
100202d09e03SGordon Ross 	 * of checks done.
100302d09e03SGordon Ross 	 */
100402d09e03SGordon Ross 	while ((np = rlist) != NULL) {
100502d09e03SGordon Ross 		rlist = (smbnode_t *)np->r_avl_node.avl_child[0];
100602d09e03SGordon Ross 		smbfs_addfree(np);
100702d09e03SGordon Ross 	}
10084bff34e3Sthurlow }
10094bff34e3Sthurlow 
10104bff34e3Sthurlow /*
10114bff34e3Sthurlow  * This routine destroys all the resources associated with the smbnode
101202d09e03SGordon Ross  * and then the smbnode itself.  Note: sn_inactive has been called.
10134bff34e3Sthurlow  *
10145f4fc069Sjilinxpd  * From NFS: nfs_subr.c:destroy_rnode
10154bff34e3Sthurlow  */
101602d09e03SGordon Ross static void
sn_destroy_node(smbnode_t * np)101702d09e03SGordon Ross sn_destroy_node(smbnode_t *np)
10184bff34e3Sthurlow {
10194bff34e3Sthurlow 	vnode_t *vp;
10204bff34e3Sthurlow 	vfs_t *vfsp;
10214bff34e3Sthurlow 
10224bff34e3Sthurlow 	vp = SMBTOV(np);
10234bff34e3Sthurlow 	vfsp = vp->v_vfsp;
10244bff34e3Sthurlow 
10254bff34e3Sthurlow 	ASSERT(vp->v_count == 1);
10264bff34e3Sthurlow 	ASSERT(np->r_count == 0);
10274bff34e3Sthurlow 	ASSERT(np->r_mapcnt == 0);
1028bd7c6f51SGordon Ross 	ASSERT(np->r_secattr.vsa_aclentp == NULL);
102902d09e03SGordon Ross 	ASSERT(np->r_cred == NULL);
103002d09e03SGordon Ross 	ASSERT(np->n_rpath == NULL);
10314bff34e3Sthurlow 	ASSERT(!(np->r_flags & RHASHED));
10324bff34e3Sthurlow 	ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
10331a5e258fSJosef 'Jeff' Sipek 	atomic_dec_ulong((ulong_t *)&smbnodenew);
10344bff34e3Sthurlow 	vn_invalid(vp);
10354bff34e3Sthurlow 	vn_free(vp);
10364bff34e3Sthurlow 	kmem_cache_free(smbnode_cache, np);
10374bff34e3Sthurlow 	VFS_RELE(vfsp);
10384bff34e3Sthurlow }
10394bff34e3Sthurlow 
104002d09e03SGordon Ross /*
10415f4fc069Sjilinxpd  * From NFS rflush()
104202d09e03SGordon Ross  * Flush all vnodes in this (or every) vfs.
10435f4fc069Sjilinxpd  * Used by smbfs_sync and by smbfs_unmount.
104402d09e03SGordon Ross  */
104502d09e03SGordon Ross /*ARGSUSED*/
104602d09e03SGordon Ross void
smbfs_rflush(struct vfs * vfsp,cred_t * cr)104702d09e03SGordon Ross smbfs_rflush(struct vfs *vfsp, cred_t *cr)
104802d09e03SGordon Ross {
10495f4fc069Sjilinxpd 	smbmntinfo_t *mi;
10505f4fc069Sjilinxpd 	smbnode_t *np;
10515f4fc069Sjilinxpd 	vnode_t *vp, **vplist;
10525f4fc069Sjilinxpd 	long num, cnt;
10535f4fc069Sjilinxpd 
10545f4fc069Sjilinxpd 	mi = VFTOSMI(vfsp);
10555f4fc069Sjilinxpd 
10565f4fc069Sjilinxpd 	/*
10575f4fc069Sjilinxpd 	 * Check to see whether there is anything to do.
10585f4fc069Sjilinxpd 	 */
10595f4fc069Sjilinxpd 	num = avl_numnodes(&mi->smi_hash_avl);
10605f4fc069Sjilinxpd 	if (num == 0)
10615f4fc069Sjilinxpd 		return;
10625f4fc069Sjilinxpd 
10635f4fc069Sjilinxpd 	/*
10645f4fc069Sjilinxpd 	 * Allocate a slot for all currently active rnodes on the
10655f4fc069Sjilinxpd 	 * supposition that they all may need flushing.
10665f4fc069Sjilinxpd 	 */
10675f4fc069Sjilinxpd 	vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP);
10685f4fc069Sjilinxpd 	cnt = 0;
10695f4fc069Sjilinxpd 
10705f4fc069Sjilinxpd 	/*
10715f4fc069Sjilinxpd 	 * Walk the AVL tree looking for rnodes with page
10725f4fc069Sjilinxpd 	 * lists associated with them.  Make a list of these
10735f4fc069Sjilinxpd 	 * files.
10745f4fc069Sjilinxpd 	 */
10755f4fc069Sjilinxpd 	rw_enter(&mi->smi_hash_lk, RW_READER);
10765f4fc069Sjilinxpd 	for (np = avl_first(&mi->smi_hash_avl); np != NULL;
10775f4fc069Sjilinxpd 	    np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) {
10785f4fc069Sjilinxpd 		vp = SMBTOV(np);
10795f4fc069Sjilinxpd 		/*
10805f4fc069Sjilinxpd 		 * Don't bother sync'ing a vp if it
10815f4fc069Sjilinxpd 		 * is part of virtual swap device or
10825f4fc069Sjilinxpd 		 * if VFS is read-only
10835f4fc069Sjilinxpd 		 */
10845f4fc069Sjilinxpd 		if (IS_SWAPVP(vp) || vn_is_readonly(vp))
10855f4fc069Sjilinxpd 			continue;
10865f4fc069Sjilinxpd 		/*
10875f4fc069Sjilinxpd 		 * If the vnode has pages and is marked as either
10885f4fc069Sjilinxpd 		 * dirty or mmap'd, hold and add this vnode to the
10895f4fc069Sjilinxpd 		 * list of vnodes to flush.
10905f4fc069Sjilinxpd 		 */
10915f4fc069Sjilinxpd 		if (vn_has_cached_data(vp) &&
10925f4fc069Sjilinxpd 		    ((np->r_flags & RDIRTY) || np->r_mapcnt > 0)) {
10935f4fc069Sjilinxpd 			VN_HOLD(vp);
10945f4fc069Sjilinxpd 			vplist[cnt++] = vp;
10955f4fc069Sjilinxpd 			if (cnt == num)
10965f4fc069Sjilinxpd 				break;
10975f4fc069Sjilinxpd 		}
10985f4fc069Sjilinxpd 	}
10995f4fc069Sjilinxpd 	rw_exit(&mi->smi_hash_lk);
11005f4fc069Sjilinxpd 
11015f4fc069Sjilinxpd 	/*
11025f4fc069Sjilinxpd 	 * Flush and release all of the files on the list.
11035f4fc069Sjilinxpd 	 */
11045f4fc069Sjilinxpd 	while (cnt-- > 0) {
11055f4fc069Sjilinxpd 		vp = vplist[cnt];
11065f4fc069Sjilinxpd 		(void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL);
11075f4fc069Sjilinxpd 		VN_RELE(vp);
11085f4fc069Sjilinxpd 	}
11095f4fc069Sjilinxpd 
11105f4fc069Sjilinxpd 	kmem_free(vplist, num * sizeof (vnode_t *));
111102d09e03SGordon Ross }
111202d09e03SGordon Ross 
11135f4fc069Sjilinxpd /* Here NFS has access cache stuff (nfs_subr.c) not used here */
11145f4fc069Sjilinxpd 
11155f4fc069Sjilinxpd /*
11165f4fc069Sjilinxpd  * Set or Clear direct I/O flag
11175f4fc069Sjilinxpd  * VOP_RWLOCK() is held for write access to prevent a race condition
11185f4fc069Sjilinxpd  * which would occur if a process is in the middle of a write when
11195f4fc069Sjilinxpd  * directio flag gets set. It is possible that all pages may not get flushed.
11205f4fc069Sjilinxpd  * From nfs_common.c
11215f4fc069Sjilinxpd  */
11225f4fc069Sjilinxpd 
11235f4fc069Sjilinxpd /* ARGSUSED */
11245f4fc069Sjilinxpd int
smbfs_directio(vnode_t * vp,int cmd,cred_t * cr)11255f4fc069Sjilinxpd smbfs_directio(vnode_t *vp, int cmd, cred_t *cr)
11265f4fc069Sjilinxpd {
11275f4fc069Sjilinxpd 	int	error = 0;
11285f4fc069Sjilinxpd 	smbnode_t	*np;
11295f4fc069Sjilinxpd 
11305f4fc069Sjilinxpd 	np = VTOSMB(vp);
11315f4fc069Sjilinxpd 
11325f4fc069Sjilinxpd 	if (cmd == DIRECTIO_ON) {
11335f4fc069Sjilinxpd 
11345f4fc069Sjilinxpd 		if (np->r_flags & RDIRECTIO)
11355f4fc069Sjilinxpd 			return (0);
11365f4fc069Sjilinxpd 
11375f4fc069Sjilinxpd 		/*
11385f4fc069Sjilinxpd 		 * Flush the page cache.
11395f4fc069Sjilinxpd 		 */
11405f4fc069Sjilinxpd 
11415f4fc069Sjilinxpd 		(void) VOP_RWLOCK(vp, V_WRITELOCK_TRUE, NULL);
11425f4fc069Sjilinxpd 
11435f4fc069Sjilinxpd 		if (np->r_flags & RDIRECTIO) {
11445f4fc069Sjilinxpd 			VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
11455f4fc069Sjilinxpd 			return (0);
11465f4fc069Sjilinxpd 		}
11475f4fc069Sjilinxpd 
11485f4fc069Sjilinxpd 		/* Here NFS also checks ->r_awcount */
11495f4fc069Sjilinxpd 		if (vn_has_cached_data(vp) &&
11505f4fc069Sjilinxpd 		    (np->r_flags & RDIRTY) != 0) {
11515f4fc069Sjilinxpd 			error = VOP_PUTPAGE(vp, (offset_t)0, (uint_t)0,
11525f4fc069Sjilinxpd 			    B_INVAL, cr, NULL);
11535f4fc069Sjilinxpd 			if (error) {
11545f4fc069Sjilinxpd 				if (error == ENOSPC || error == EDQUOT) {
11555f4fc069Sjilinxpd 					mutex_enter(&np->r_statelock);
11565f4fc069Sjilinxpd 					if (!np->r_error)
11575f4fc069Sjilinxpd 						np->r_error = error;
11585f4fc069Sjilinxpd 					mutex_exit(&np->r_statelock);
11595f4fc069Sjilinxpd 				}
11605f4fc069Sjilinxpd 				VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
11615f4fc069Sjilinxpd 				return (error);
11625f4fc069Sjilinxpd 			}
11635f4fc069Sjilinxpd 		}
11645f4fc069Sjilinxpd 
11655f4fc069Sjilinxpd 		mutex_enter(&np->r_statelock);
11665f4fc069Sjilinxpd 		np->r_flags |= RDIRECTIO;
11675f4fc069Sjilinxpd 		mutex_exit(&np->r_statelock);
11685f4fc069Sjilinxpd 		VOP_RWUNLOCK(vp, V_WRITELOCK_TRUE, NULL);
11695f4fc069Sjilinxpd 		return (0);
11705f4fc069Sjilinxpd 	}
11715f4fc069Sjilinxpd 
11725f4fc069Sjilinxpd 	if (cmd == DIRECTIO_OFF) {
11735f4fc069Sjilinxpd 		mutex_enter(&np->r_statelock);
11745f4fc069Sjilinxpd 		np->r_flags &= ~RDIRECTIO;	/* disable direct mode */
11755f4fc069Sjilinxpd 		mutex_exit(&np->r_statelock);
11765f4fc069Sjilinxpd 		return (0);
11775f4fc069Sjilinxpd 	}
11785f4fc069Sjilinxpd 
11795f4fc069Sjilinxpd 	return (EINVAL);
11805f4fc069Sjilinxpd }
1181ff1e230cSjilinxpd 
1182ff1e230cSjilinxpd static kmutex_t smbfs_newnum_lock;
1183ff1e230cSjilinxpd static uint32_t smbfs_newnum_val = 0;
1184ff1e230cSjilinxpd 
1185ff1e230cSjilinxpd /*
1186ff1e230cSjilinxpd  * Return a number 0..0xffffffff that's different from the last
1187ff1e230cSjilinxpd  * 0xffffffff numbers this returned.  Used for unlinked files.
11885f4fc069Sjilinxpd  * From NFS nfs_subr.c newnum
1189ff1e230cSjilinxpd  */
1190ff1e230cSjilinxpd uint32_t
smbfs_newnum(void)1191ff1e230cSjilinxpd smbfs_newnum(void)
1192ff1e230cSjilinxpd {
1193ff1e230cSjilinxpd 	uint32_t id;
1194ff1e230cSjilinxpd 
1195ff1e230cSjilinxpd 	mutex_enter(&smbfs_newnum_lock);
1196ff1e230cSjilinxpd 	if (smbfs_newnum_val == 0)
1197ff1e230cSjilinxpd 		smbfs_newnum_val = (uint32_t)gethrestime_sec();
1198ff1e230cSjilinxpd 	id = smbfs_newnum_val++;
1199ff1e230cSjilinxpd 	mutex_exit(&smbfs_newnum_lock);
1200ff1e230cSjilinxpd 	return (id);
1201ff1e230cSjilinxpd }
1202ff1e230cSjilinxpd 
1203ff1e230cSjilinxpd /*
1204ff1e230cSjilinxpd  * Fill in a temporary name at buf
1205ff1e230cSjilinxpd  */
1206ff1e230cSjilinxpd int
smbfs_newname(char * buf,size_t buflen)1207ff1e230cSjilinxpd smbfs_newname(char *buf, size_t buflen)
1208ff1e230cSjilinxpd {
1209ff1e230cSjilinxpd 	uint_t id;
1210ff1e230cSjilinxpd 	int n;
1211ff1e230cSjilinxpd 
1212ff1e230cSjilinxpd 	id = smbfs_newnum();
1213ff1e230cSjilinxpd 	n = snprintf(buf, buflen, "~$smbfs%08X", id);
1214ff1e230cSjilinxpd 	return (n);
1215ff1e230cSjilinxpd }
1216ff1e230cSjilinxpd 
12174bff34e3Sthurlow 
12184bff34e3Sthurlow /*
12194bff34e3Sthurlow  * initialize resources that are used by smbfs_subr.c
12204bff34e3Sthurlow  * this is called from the _init() routine (by the way of smbfs_clntinit())
12214bff34e3Sthurlow  *
12225f4fc069Sjilinxpd  * From NFS: nfs_subr.c:nfs_subrinit
12234bff34e3Sthurlow  */
12244bff34e3Sthurlow int
smbfs_subrinit(void)12254bff34e3Sthurlow smbfs_subrinit(void)
12264bff34e3Sthurlow {
12274bff34e3Sthurlow 	ulong_t nsmbnode_max;
12284bff34e3Sthurlow 
12294bff34e3Sthurlow 	/*
123002d09e03SGordon Ross 	 * Allocate and initialize the smbnode cache
12314bff34e3Sthurlow 	 */
12324bff34e3Sthurlow 	if (nsmbnode <= 0)
12334bff34e3Sthurlow 		nsmbnode = ncsize; /* dnlc.h */
12344bff34e3Sthurlow 	nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) /
12354bff34e3Sthurlow 	    sizeof (struct smbnode));
12364bff34e3Sthurlow 	if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) {
12378329232eSGordon Ross 		cmn_err(CE_NOTE,
12384bff34e3Sthurlow 		    "setting nsmbnode to max value of %ld", nsmbnode_max);
12394bff34e3Sthurlow 		nsmbnode = nsmbnode_max;
12404bff34e3Sthurlow 	}
12414bff34e3Sthurlow 
12424bff34e3Sthurlow 	smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t),
12434bff34e3Sthurlow 	    0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0);
12444bff34e3Sthurlow 
12454bff34e3Sthurlow 	/*
12464bff34e3Sthurlow 	 * Initialize the various mutexes and reader/writer locks
12474bff34e3Sthurlow 	 */
12484bff34e3Sthurlow 	mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL);
12494bff34e3Sthurlow 	mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL);
12504bff34e3Sthurlow 
12514bff34e3Sthurlow 	/*
12524bff34e3Sthurlow 	 * Assign unique major number for all smbfs mounts
12534bff34e3Sthurlow 	 */
12544bff34e3Sthurlow 	if ((smbfs_major = getudev()) == -1) {
12558329232eSGordon Ross 		cmn_err(CE_WARN,
12564bff34e3Sthurlow 		    "smbfs: init: can't get unique device number");
12574bff34e3Sthurlow 		smbfs_major = 0;
12584bff34e3Sthurlow 	}
12594bff34e3Sthurlow 	smbfs_minor = 0;
12604bff34e3Sthurlow 
12614bff34e3Sthurlow 	return (0);
12624bff34e3Sthurlow }
12634bff34e3Sthurlow 
12644bff34e3Sthurlow /*
12654bff34e3Sthurlow  * free smbfs hash table, etc.
12665f4fc069Sjilinxpd  * From NFS: nfs_subr.c:nfs_subrfini
12674bff34e3Sthurlow  */
12684bff34e3Sthurlow void
smbfs_subrfini(void)12694bff34e3Sthurlow smbfs_subrfini(void)
12704bff34e3Sthurlow {
12714bff34e3Sthurlow 
12724bff34e3Sthurlow 	/*
127302d09e03SGordon Ross 	 * Destroy the smbnode cache
12744bff34e3Sthurlow 	 */
12754bff34e3Sthurlow 	kmem_cache_destroy(smbnode_cache);
12764bff34e3Sthurlow 
12774bff34e3Sthurlow 	/*
12784bff34e3Sthurlow 	 * Destroy the various mutexes and reader/writer locks
12794bff34e3Sthurlow 	 */
12804bff34e3Sthurlow 	mutex_destroy(&smbfreelist_lock);
12814bff34e3Sthurlow 	mutex_destroy(&smbfs_minor_lock);
12824bff34e3Sthurlow }
12834bff34e3Sthurlow 
12844bff34e3Sthurlow /* rddir_cache ? */
12854bff34e3Sthurlow 
12864bff34e3Sthurlow /*
12874bff34e3Sthurlow  * Support functions for smbfs_kmem_reclaim
12884bff34e3Sthurlow  */
12894bff34e3Sthurlow 
129002d09e03SGordon Ross static void
smbfs_node_reclaim(void)12914bff34e3Sthurlow smbfs_node_reclaim(void)
12924bff34e3Sthurlow {
129302d09e03SGordon Ross 	smbmntinfo_t *mi;
12944bff34e3Sthurlow 	smbnode_t *np;
12954bff34e3Sthurlow 	vnode_t *vp;
12964bff34e3Sthurlow 
12974bff34e3Sthurlow 	mutex_enter(&smbfreelist_lock);
12984bff34e3Sthurlow 	while ((np = smbfreelist) != NULL) {
129902d09e03SGordon Ross 		sn_rmfree(np);
13004bff34e3Sthurlow 		mutex_exit(&smbfreelist_lock);
13014bff34e3Sthurlow 		if (np->r_flags & RHASHED) {
13024bff34e3Sthurlow 			vp = SMBTOV(np);
130302d09e03SGordon Ross 			mi = np->n_mount;
130402d09e03SGordon Ross 			rw_enter(&mi->smi_hash_lk, RW_WRITER);
13054bff34e3Sthurlow 			mutex_enter(&vp->v_lock);
13064bff34e3Sthurlow 			if (vp->v_count > 1) {
1307ade42b55SSebastien Roy 				VN_RELE_LOCKED(vp);
13084bff34e3Sthurlow 				mutex_exit(&vp->v_lock);
130902d09e03SGordon Ross 				rw_exit(&mi->smi_hash_lk);
13104bff34e3Sthurlow 				mutex_enter(&smbfreelist_lock);
13114bff34e3Sthurlow 				continue;
13124bff34e3Sthurlow 			}
13134bff34e3Sthurlow 			mutex_exit(&vp->v_lock);
131402d09e03SGordon Ross 			sn_rmhash_locked(np);
131502d09e03SGordon Ross 			rw_exit(&mi->smi_hash_lk);
13164bff34e3Sthurlow 		}
13174bff34e3Sthurlow 		/*
131802d09e03SGordon Ross 		 * This call to smbfs_addfree will end up destroying the
13194bff34e3Sthurlow 		 * smbnode, but in a safe way with the appropriate set
13204bff34e3Sthurlow 		 * of checks done.
13214bff34e3Sthurlow 		 */
132202d09e03SGordon Ross 		smbfs_addfree(np);
13234bff34e3Sthurlow 		mutex_enter(&smbfreelist_lock);
13244bff34e3Sthurlow 	}
13254bff34e3Sthurlow 	mutex_exit(&smbfreelist_lock);
13264bff34e3Sthurlow }
13274bff34e3Sthurlow 
13284bff34e3Sthurlow /*
13294bff34e3Sthurlow  * Called by kmem_cache_alloc ask us if we could
13304bff34e3Sthurlow  * "Please give back some memory!"
13314bff34e3Sthurlow  *
13324bff34e3Sthurlow  * Todo: dump nodes from the free list?
13334bff34e3Sthurlow  */
13344bff34e3Sthurlow /*ARGSUSED*/
13354bff34e3Sthurlow void
smbfs_kmem_reclaim(void * cdrarg)13364bff34e3Sthurlow smbfs_kmem_reclaim(void *cdrarg)
13374bff34e3Sthurlow {
133802d09e03SGordon Ross 	smbfs_node_reclaim();
13394bff34e3Sthurlow }
13404bff34e3Sthurlow 
13415f4fc069Sjilinxpd /*
13425f4fc069Sjilinxpd  * Here NFS has failover stuff and
13435f4fc069Sjilinxpd  * nfs_rw_xxx - see smbfs_rwlock.c
13445f4fc069Sjilinxpd  */
1345