xref: /illumos-gate/usr/src/uts/common/fs/nfs/nfs4_rnode.c (revision bbf2a467)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
59f9e2373Sjwahlig  * Common Development and Distribution License (the "License").
69f9e2373Sjwahlig  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*bbf2a467SNagakiran Rajashekar  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate /*
277c478bd9Sstevel@tonic-gate  *  	Copyright (c) 1983,1984,1985,1986,1987,1988,1989  AT&T.
287c478bd9Sstevel@tonic-gate  *	All Rights Reserved
297c478bd9Sstevel@tonic-gate  */
307c478bd9Sstevel@tonic-gate 
317c478bd9Sstevel@tonic-gate 
327c478bd9Sstevel@tonic-gate #include <sys/param.h>
337c478bd9Sstevel@tonic-gate #include <sys/types.h>
347c478bd9Sstevel@tonic-gate #include <sys/systm.h>
357c478bd9Sstevel@tonic-gate #include <sys/cred.h>
367c478bd9Sstevel@tonic-gate #include <sys/proc.h>
377c478bd9Sstevel@tonic-gate #include <sys/user.h>
387c478bd9Sstevel@tonic-gate #include <sys/time.h>
397c478bd9Sstevel@tonic-gate #include <sys/buf.h>
407c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
417c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
427c478bd9Sstevel@tonic-gate #include <sys/socket.h>
437c478bd9Sstevel@tonic-gate #include <sys/uio.h>
447c478bd9Sstevel@tonic-gate #include <sys/tiuser.h>
457c478bd9Sstevel@tonic-gate #include <sys/swap.h>
467c478bd9Sstevel@tonic-gate #include <sys/errno.h>
477c478bd9Sstevel@tonic-gate #include <sys/debug.h>
487c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
497c478bd9Sstevel@tonic-gate #include <sys/kstat.h>
507c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
517c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
527c478bd9Sstevel@tonic-gate #include <sys/session.h>
537c478bd9Sstevel@tonic-gate #include <sys/dnlc.h>
547c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
557c478bd9Sstevel@tonic-gate #include <sys/acl.h>
567c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
577c478bd9Sstevel@tonic-gate #include <sys/pathname.h>
587c478bd9Sstevel@tonic-gate #include <sys/flock.h>
597c478bd9Sstevel@tonic-gate #include <sys/dirent.h>
607c478bd9Sstevel@tonic-gate #include <sys/flock.h>
617c478bd9Sstevel@tonic-gate #include <sys/callb.h>
627c478bd9Sstevel@tonic-gate 
637c478bd9Sstevel@tonic-gate #include <rpc/types.h>
647c478bd9Sstevel@tonic-gate #include <rpc/xdr.h>
657c478bd9Sstevel@tonic-gate #include <rpc/auth.h>
667c478bd9Sstevel@tonic-gate #include <rpc/rpcsec_gss.h>
677c478bd9Sstevel@tonic-gate #include <rpc/clnt.h>
687c478bd9Sstevel@tonic-gate 
697c478bd9Sstevel@tonic-gate #include <nfs/nfs.h>
707c478bd9Sstevel@tonic-gate #include <nfs/nfs_clnt.h>
717c478bd9Sstevel@tonic-gate #include <nfs/nfs_acl.h>
727c478bd9Sstevel@tonic-gate 
737c478bd9Sstevel@tonic-gate #include <nfs/nfs4.h>
747c478bd9Sstevel@tonic-gate #include <nfs/rnode4.h>
757c478bd9Sstevel@tonic-gate #include <nfs/nfs4_clnt.h>
767c478bd9Sstevel@tonic-gate 
777c478bd9Sstevel@tonic-gate /*
787c478bd9Sstevel@tonic-gate  * The hash queues for the access to active and cached rnodes
797c478bd9Sstevel@tonic-gate  * are organized as doubly linked lists.  A reader/writer lock
807c478bd9Sstevel@tonic-gate  * for each hash bucket is used to control access and to synchronize
817c478bd9Sstevel@tonic-gate  * lookups, additions, and deletions from the hash queue.
827c478bd9Sstevel@tonic-gate  *
837c478bd9Sstevel@tonic-gate  * The rnode freelist is organized as a doubly linked list with
847c478bd9Sstevel@tonic-gate  * a head pointer.  Additions and deletions are synchronized via
857c478bd9Sstevel@tonic-gate  * a single mutex.
867c478bd9Sstevel@tonic-gate  *
877c478bd9Sstevel@tonic-gate  * In order to add an rnode to the free list, it must be hashed into
887c478bd9Sstevel@tonic-gate  * a hash queue and the exclusive lock to the hash queue be held.
897c478bd9Sstevel@tonic-gate  * If an rnode is not hashed into a hash queue, then it is destroyed
907c478bd9Sstevel@tonic-gate  * because it represents no valuable information that can be reused
917c478bd9Sstevel@tonic-gate  * about the file.  The exclusive lock to the hash queue must be
927c478bd9Sstevel@tonic-gate  * held in order to prevent a lookup in the hash queue from finding
937c478bd9Sstevel@tonic-gate  * the rnode and using it and assuming that the rnode is not on the
947c478bd9Sstevel@tonic-gate  * freelist.  The lookup in the hash queue will have the hash queue
957c478bd9Sstevel@tonic-gate  * locked, either exclusive or shared.
967c478bd9Sstevel@tonic-gate  *
977c478bd9Sstevel@tonic-gate  * The vnode reference count for each rnode is not allowed to drop
987c478bd9Sstevel@tonic-gate  * below 1.  This prevents external entities, such as the VM
997c478bd9Sstevel@tonic-gate  * subsystem, from acquiring references to vnodes already on the
1007c478bd9Sstevel@tonic-gate  * freelist and then trying to place them back on the freelist
1017c478bd9Sstevel@tonic-gate  * when their reference is released.  This means that the when an
1027c478bd9Sstevel@tonic-gate  * rnode is looked up in the hash queues, then either the rnode
103da6c28aaSamw  * is removed from the freelist and that reference is transferred to
1047c478bd9Sstevel@tonic-gate  * the new reference or the vnode reference count must be incremented
1057c478bd9Sstevel@tonic-gate  * accordingly.  The mutex for the freelist must be held in order to
1067c478bd9Sstevel@tonic-gate  * accurately test to see if the rnode is on the freelist or not.
1077c478bd9Sstevel@tonic-gate  * The hash queue lock might be held shared and it is possible that
1087c478bd9Sstevel@tonic-gate  * two different threads may race to remove the rnode from the
1097c478bd9Sstevel@tonic-gate  * freelist.  This race can be resolved by holding the mutex for the
1107c478bd9Sstevel@tonic-gate  * freelist.  Please note that the mutex for the freelist does not
1117c478bd9Sstevel@tonic-gate  * need to be held if the rnode is not on the freelist.  It can not be
1127c478bd9Sstevel@tonic-gate  * placed on the freelist due to the requirement that the thread
1137c478bd9Sstevel@tonic-gate  * putting the rnode on the freelist must hold the exclusive lock
1147c478bd9Sstevel@tonic-gate  * to the hash queue and the thread doing the lookup in the hash
1157c478bd9Sstevel@tonic-gate  * queue is holding either a shared or exclusive lock to the hash
1167c478bd9Sstevel@tonic-gate  * queue.
1177c478bd9Sstevel@tonic-gate  *
1187c478bd9Sstevel@tonic-gate  * The lock ordering is:
1197c478bd9Sstevel@tonic-gate  *
1207c478bd9Sstevel@tonic-gate  *	hash bucket lock -> vnode lock
1212d1fef97Ssamf  *	hash bucket lock -> freelist lock -> r_statelock
1227c478bd9Sstevel@tonic-gate  */
1237c478bd9Sstevel@tonic-gate r4hashq_t *rtable4;
1247c478bd9Sstevel@tonic-gate 
1257c478bd9Sstevel@tonic-gate static kmutex_t rp4freelist_lock;
1267c478bd9Sstevel@tonic-gate static rnode4_t *rp4freelist = NULL;
1277c478bd9Sstevel@tonic-gate static long rnode4_new = 0;
1287c478bd9Sstevel@tonic-gate int rtable4size;
1297c478bd9Sstevel@tonic-gate static int rtable4mask;
1307c478bd9Sstevel@tonic-gate static struct kmem_cache *rnode4_cache;
1317c478bd9Sstevel@tonic-gate static int rnode4_hashlen = 4;
1327c478bd9Sstevel@tonic-gate 
1337c478bd9Sstevel@tonic-gate static void	r4inactive(rnode4_t *, cred_t *);
1347c478bd9Sstevel@tonic-gate static vnode_t	*make_rnode4(nfs4_sharedfh_t *, r4hashq_t *, struct vfs *,
1357c478bd9Sstevel@tonic-gate 		    struct vnodeops *,
1367c478bd9Sstevel@tonic-gate 		    int (*)(vnode_t *, page_t *, u_offset_t *, size_t *, int,
1377c478bd9Sstevel@tonic-gate 		    cred_t *),
1387c478bd9Sstevel@tonic-gate 		    int *, cred_t *);
1397c478bd9Sstevel@tonic-gate static void	rp4_rmfree(rnode4_t *);
1407c478bd9Sstevel@tonic-gate int		nfs4_free_data_reclaim(rnode4_t *);
1417c478bd9Sstevel@tonic-gate static int	nfs4_active_data_reclaim(rnode4_t *);
1427c478bd9Sstevel@tonic-gate static int	nfs4_free_reclaim(void);
1437c478bd9Sstevel@tonic-gate static int	nfs4_active_reclaim(void);
1447c478bd9Sstevel@tonic-gate static int	nfs4_rnode_reclaim(void);
1457c478bd9Sstevel@tonic-gate static void	nfs4_reclaim(void *);
1467c478bd9Sstevel@tonic-gate static int	isrootfh(nfs4_sharedfh_t *, rnode4_t *);
1477c478bd9Sstevel@tonic-gate static void	uninit_rnode4(rnode4_t *);
1487c478bd9Sstevel@tonic-gate static void	destroy_rnode4(rnode4_t *);
149b9238976Sth static void	r4_stub_set(rnode4_t *, nfs4_stub_type_t);
1507c478bd9Sstevel@tonic-gate 
1517c478bd9Sstevel@tonic-gate #ifdef DEBUG
1527c478bd9Sstevel@tonic-gate static int r4_check_for_dups = 0; /* Flag to enable dup rnode detection. */
1537c478bd9Sstevel@tonic-gate static int nfs4_rnode_debug = 0;
1547c478bd9Sstevel@tonic-gate /* if nonzero, kmem_cache_free() rnodes rather than place on freelist */
1557c478bd9Sstevel@tonic-gate static int nfs4_rnode_nofreelist = 0;
1567c478bd9Sstevel@tonic-gate /* give messages on colliding shared filehandles */
1577c478bd9Sstevel@tonic-gate static void	r4_dup_check(rnode4_t *, vfs_t *);
1587c478bd9Sstevel@tonic-gate #endif
1597c478bd9Sstevel@tonic-gate 
1607c478bd9Sstevel@tonic-gate /*
1619f9e2373Sjwahlig  * If the vnode has pages, run the list and check for any that are
1629f9e2373Sjwahlig  * still dangling.  We call this routine before putting an rnode on
1639f9e2373Sjwahlig  * the free list.
1649f9e2373Sjwahlig  */
1659f9e2373Sjwahlig static int
1669f9e2373Sjwahlig nfs4_dross_pages(vnode_t *vp)
1679f9e2373Sjwahlig {
1689f9e2373Sjwahlig 	page_t *pp;
1699f9e2373Sjwahlig 	kmutex_t *vphm;
1709f9e2373Sjwahlig 
1719f9e2373Sjwahlig 	vphm = page_vnode_mutex(vp);
1729f9e2373Sjwahlig 	mutex_enter(vphm);
1739f9e2373Sjwahlig 	if ((pp = vp->v_pages) != NULL) {
1749f9e2373Sjwahlig 		do {
1759f9e2373Sjwahlig 			if (pp->p_fsdata != C_NOCOMMIT) {
1769f9e2373Sjwahlig 				mutex_exit(vphm);
1779f9e2373Sjwahlig 				return (1);
1789f9e2373Sjwahlig 			}
1799f9e2373Sjwahlig 		} while ((pp = pp->p_vpnext) != vp->v_pages);
1809f9e2373Sjwahlig 	}
1819f9e2373Sjwahlig 	mutex_exit(vphm);
1829f9e2373Sjwahlig 
1839f9e2373Sjwahlig 	return (0);
1849f9e2373Sjwahlig }
1859f9e2373Sjwahlig 
1869f9e2373Sjwahlig /*
1879f9e2373Sjwahlig  * Flush any pages left on this rnode.
1887c478bd9Sstevel@tonic-gate  */
1897c478bd9Sstevel@tonic-gate static void
1909f9e2373Sjwahlig r4flushpages(rnode4_t *rp, cred_t *cr)
1917c478bd9Sstevel@tonic-gate {
1927c478bd9Sstevel@tonic-gate 	vnode_t *vp;
1937c478bd9Sstevel@tonic-gate 	int error;
1947c478bd9Sstevel@tonic-gate 
1957c478bd9Sstevel@tonic-gate 	/*
1967c478bd9Sstevel@tonic-gate 	 * Before freeing anything, wait until all asynchronous
1977c478bd9Sstevel@tonic-gate 	 * activity is done on this rnode.  This will allow all
1987c478bd9Sstevel@tonic-gate 	 * asynchronous read ahead and write behind i/o's to
1997c478bd9Sstevel@tonic-gate 	 * finish.
2007c478bd9Sstevel@tonic-gate 	 */
2017c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
2027c478bd9Sstevel@tonic-gate 	while (rp->r_count > 0)
2037c478bd9Sstevel@tonic-gate 		cv_wait(&rp->r_cv, &rp->r_statelock);
2047c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
2057c478bd9Sstevel@tonic-gate 
2067c478bd9Sstevel@tonic-gate 	/*
2077c478bd9Sstevel@tonic-gate 	 * Flush and invalidate all pages associated with the vnode.
2087c478bd9Sstevel@tonic-gate 	 */
2097c478bd9Sstevel@tonic-gate 	vp = RTOV4(rp);
2107c478bd9Sstevel@tonic-gate 	if (nfs4_has_pages(vp)) {
2117c478bd9Sstevel@tonic-gate 		ASSERT(vp->v_type != VCHR);
2127c478bd9Sstevel@tonic-gate 		if ((rp->r_flags & R4DIRTY) && !rp->r_error) {
213da6c28aaSamw 			error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, 0, cr, NULL);
2147c478bd9Sstevel@tonic-gate 			if (error && (error == ENOSPC || error == EDQUOT)) {
2157c478bd9Sstevel@tonic-gate 				mutex_enter(&rp->r_statelock);
2167c478bd9Sstevel@tonic-gate 				if (!rp->r_error)
2177c478bd9Sstevel@tonic-gate 					rp->r_error = error;
2187c478bd9Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
2197c478bd9Sstevel@tonic-gate 			}
2207c478bd9Sstevel@tonic-gate 		}
2217c478bd9Sstevel@tonic-gate 		nfs4_invalidate_pages(vp, (u_offset_t)0, cr);
2227c478bd9Sstevel@tonic-gate 	}
2239f9e2373Sjwahlig }
2249f9e2373Sjwahlig 
2259f9e2373Sjwahlig /*
2269f9e2373Sjwahlig  * Free the resources associated with an rnode.
2279f9e2373Sjwahlig  */
2289f9e2373Sjwahlig static void
2299f9e2373Sjwahlig r4inactive(rnode4_t *rp, cred_t *cr)
2309f9e2373Sjwahlig {
2319f9e2373Sjwahlig 	vnode_t *vp;
2329f9e2373Sjwahlig 	char *contents;
2339f9e2373Sjwahlig 	int size;
2349f9e2373Sjwahlig 	vsecattr_t *vsp;
2359f9e2373Sjwahlig 	vnode_t *xattr;
2369f9e2373Sjwahlig 
2379f9e2373Sjwahlig 	r4flushpages(rp, cr);
2389f9e2373Sjwahlig 
2399f9e2373Sjwahlig 	vp = RTOV4(rp);
2407c478bd9Sstevel@tonic-gate 
2417c478bd9Sstevel@tonic-gate 	/*
2427c478bd9Sstevel@tonic-gate 	 * Free any held caches which may be
2437c478bd9Sstevel@tonic-gate 	 * associated with this rnode.
2447c478bd9Sstevel@tonic-gate 	 */
2457c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
2467c478bd9Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
2477c478bd9Sstevel@tonic-gate 	size = rp->r_symlink.size;
2487c478bd9Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
2497c478bd9Sstevel@tonic-gate 	vsp = rp->r_secattr;
2507c478bd9Sstevel@tonic-gate 	rp->r_secattr = NULL;
2517c478bd9Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
2527c478bd9Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
2537c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
2547c478bd9Sstevel@tonic-gate 
2557c478bd9Sstevel@tonic-gate 	/*
2567c478bd9Sstevel@tonic-gate 	 * Free the access cache entries.
2577c478bd9Sstevel@tonic-gate 	 */
2587c478bd9Sstevel@tonic-gate 	(void) nfs4_access_purge_rp(rp);
2597c478bd9Sstevel@tonic-gate 
2607c478bd9Sstevel@tonic-gate 	/*
2617c478bd9Sstevel@tonic-gate 	 * Free the readdir cache entries.
2627c478bd9Sstevel@tonic-gate 	 */
2637c478bd9Sstevel@tonic-gate 	nfs4_purge_rddir_cache(vp);
2647c478bd9Sstevel@tonic-gate 
2657c478bd9Sstevel@tonic-gate 	/*
2667c478bd9Sstevel@tonic-gate 	 * Free the symbolic link cache.
2677c478bd9Sstevel@tonic-gate 	 */
2687c478bd9Sstevel@tonic-gate 	if (contents != NULL) {
2697c478bd9Sstevel@tonic-gate 
2707c478bd9Sstevel@tonic-gate 		kmem_free((void *)contents, size);
2717c478bd9Sstevel@tonic-gate 	}
2727c478bd9Sstevel@tonic-gate 
2737c478bd9Sstevel@tonic-gate 	/*
2747c478bd9Sstevel@tonic-gate 	 * Free any cached ACL.
2757c478bd9Sstevel@tonic-gate 	 */
2767c478bd9Sstevel@tonic-gate 	if (vsp != NULL)
2777c478bd9Sstevel@tonic-gate 		nfs4_acl_free_cache(vsp);
2787c478bd9Sstevel@tonic-gate 
2797c478bd9Sstevel@tonic-gate 	/*
2807c478bd9Sstevel@tonic-gate 	 * Release the cached xattr_dir
2817c478bd9Sstevel@tonic-gate 	 */
2827c478bd9Sstevel@tonic-gate 	if (xattr != NULL)
2837c478bd9Sstevel@tonic-gate 		VN_RELE(xattr);
2847c478bd9Sstevel@tonic-gate }
2857c478bd9Sstevel@tonic-gate 
2867c478bd9Sstevel@tonic-gate /*
2877c478bd9Sstevel@tonic-gate  * We have seen a case that the fh passed in is for "." which
2887c478bd9Sstevel@tonic-gate  * should be a VROOT node, however, the fh is different from the
2897c478bd9Sstevel@tonic-gate  * root fh stored in the mntinfo4_t. The invalid fh might be
2907c478bd9Sstevel@tonic-gate  * from a misbehaved server and will panic the client system at
2917c478bd9Sstevel@tonic-gate  * a later time. To avoid the panic, we drop the bad fh, use
2927c478bd9Sstevel@tonic-gate  * the root fh from mntinfo4_t, and print an error message
2937c478bd9Sstevel@tonic-gate  * for attention.
2947c478bd9Sstevel@tonic-gate  */
2957c478bd9Sstevel@tonic-gate nfs4_sharedfh_t *
2967c478bd9Sstevel@tonic-gate badrootfh_check(nfs4_sharedfh_t *fh, nfs4_fname_t *nm, mntinfo4_t *mi,
2977c478bd9Sstevel@tonic-gate     int *wasbad)
2987c478bd9Sstevel@tonic-gate {
2997c478bd9Sstevel@tonic-gate 	char *s;
3007c478bd9Sstevel@tonic-gate 
3017c478bd9Sstevel@tonic-gate 	*wasbad = 0;
3027c478bd9Sstevel@tonic-gate 	s = fn_name(nm);
3037c478bd9Sstevel@tonic-gate 	ASSERT(strcmp(s, "..") != 0);
3047c478bd9Sstevel@tonic-gate 
3057c478bd9Sstevel@tonic-gate 	if ((s[0] == '.' && s[1] == '\0') && fh &&
306b9238976Sth 	    !SFH4_SAME(mi->mi_rootfh, fh)) {
3077c478bd9Sstevel@tonic-gate #ifdef DEBUG
3087c478bd9Sstevel@tonic-gate 		nfs4_fhandle_t fhandle;
3097c478bd9Sstevel@tonic-gate 
3107c478bd9Sstevel@tonic-gate 		zcmn_err(mi->mi_zone->zone_id, CE_WARN,
3117c478bd9Sstevel@tonic-gate 		    "Server %s returns a different "
3127c478bd9Sstevel@tonic-gate 		    "root filehandle for the path %s:",
3137c478bd9Sstevel@tonic-gate 		    mi->mi_curr_serv->sv_hostname,
3147c478bd9Sstevel@tonic-gate 		    mi->mi_curr_serv->sv_path);
3157c478bd9Sstevel@tonic-gate 
3167c478bd9Sstevel@tonic-gate 		/* print the bad fh */
3177c478bd9Sstevel@tonic-gate 		fhandle.fh_len = fh->sfh_fh.nfs_fh4_len;
3187c478bd9Sstevel@tonic-gate 		bcopy(fh->sfh_fh.nfs_fh4_val, fhandle.fh_buf,
319b9238976Sth 		    fhandle.fh_len);
3207c478bd9Sstevel@tonic-gate 		nfs4_printfhandle(&fhandle);
3217c478bd9Sstevel@tonic-gate 
3227c478bd9Sstevel@tonic-gate 		/* print mi_rootfh */
3237c478bd9Sstevel@tonic-gate 		fhandle.fh_len = mi->mi_rootfh->sfh_fh.nfs_fh4_len;
3247c478bd9Sstevel@tonic-gate 		bcopy(mi->mi_rootfh->sfh_fh.nfs_fh4_val, fhandle.fh_buf,
325b9238976Sth 		    fhandle.fh_len);
3267c478bd9Sstevel@tonic-gate 		nfs4_printfhandle(&fhandle);
3277c478bd9Sstevel@tonic-gate #endif
3287c478bd9Sstevel@tonic-gate 		/* use mi_rootfh instead; fh will be rele by the caller */
3297c478bd9Sstevel@tonic-gate 		fh = mi->mi_rootfh;
3307c478bd9Sstevel@tonic-gate 		*wasbad = 1;
3317c478bd9Sstevel@tonic-gate 	}
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate 	kmem_free(s, MAXNAMELEN);
3347c478bd9Sstevel@tonic-gate 	return (fh);
3357c478bd9Sstevel@tonic-gate }
3367c478bd9Sstevel@tonic-gate 
3377c478bd9Sstevel@tonic-gate void
3387c478bd9Sstevel@tonic-gate r4_do_attrcache(vnode_t *vp, nfs4_ga_res_t *garp, int newnode,
3397c478bd9Sstevel@tonic-gate     hrtime_t t, cred_t *cr, int index)
3407c478bd9Sstevel@tonic-gate {
341b9238976Sth 	int is_stub;
3427c478bd9Sstevel@tonic-gate 	vattr_t *attr;
3437c478bd9Sstevel@tonic-gate 	/*
3447c478bd9Sstevel@tonic-gate 	 * Don't add to attrcache if time overflow, but
3457c478bd9Sstevel@tonic-gate 	 * no need to check because either attr is null or the time
3467c478bd9Sstevel@tonic-gate 	 * values in it were processed by nfs4_time_ntov(), which checks
3477c478bd9Sstevel@tonic-gate 	 * for time overflows.
3487c478bd9Sstevel@tonic-gate 	 */
3497c478bd9Sstevel@tonic-gate 	attr = garp ? &garp->n4g_va : NULL;
3507c478bd9Sstevel@tonic-gate 
3517c478bd9Sstevel@tonic-gate 	if (attr) {
3527c478bd9Sstevel@tonic-gate 		if (!newnode) {
3537c478bd9Sstevel@tonic-gate 			rw_exit(&rtable4[index].r_lock);
3547c478bd9Sstevel@tonic-gate #ifdef DEBUG
3557c478bd9Sstevel@tonic-gate 			if (vp->v_type != attr->va_type &&
3567c478bd9Sstevel@tonic-gate 			    vp->v_type != VNON && attr->va_type != VNON) {
3577c478bd9Sstevel@tonic-gate 				zcmn_err(VTOMI4(vp)->mi_zone->zone_id, CE_WARN,
358b9238976Sth 				    "makenfs4node: type (%d) doesn't "
359b9238976Sth 				    "match type of found node at %p (%d)",
360b9238976Sth 				    attr->va_type, (void *)vp, vp->v_type);
3617c478bd9Sstevel@tonic-gate 			}
3627c478bd9Sstevel@tonic-gate #endif
3637c478bd9Sstevel@tonic-gate 			nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL);
3647c478bd9Sstevel@tonic-gate 		} else {
3657c478bd9Sstevel@tonic-gate 			rnode4_t *rp = VTOR4(vp);
3667c478bd9Sstevel@tonic-gate 
3677c478bd9Sstevel@tonic-gate 			vp->v_type = attr->va_type;
3687c478bd9Sstevel@tonic-gate 			vp->v_rdev = attr->va_rdev;
3697c478bd9Sstevel@tonic-gate 
3707c478bd9Sstevel@tonic-gate 			/*
3717c478bd9Sstevel@tonic-gate 			 * Turn this object into a "stub" object if we
372b9238976Sth 			 * crossed an underlying server fs boundary.
373b9238976Sth 			 * To make this check, during mount we save the
3747c478bd9Sstevel@tonic-gate 			 * fsid of the server object being mounted.
3757c478bd9Sstevel@tonic-gate 			 * Here we compare this object's server fsid
3767c478bd9Sstevel@tonic-gate 			 * with the fsid we saved at mount.  If they
3777c478bd9Sstevel@tonic-gate 			 * are different, we crossed server fs boundary.
3787c478bd9Sstevel@tonic-gate 			 *
379b9238976Sth 			 * The stub type is set (or not) at rnode
3807c478bd9Sstevel@tonic-gate 			 * creation time and it never changes for life
381b9238976Sth 			 * of the rnode.
3827c478bd9Sstevel@tonic-gate 			 *
383b9238976Sth 			 * The stub type is also set during RO failover,
384b9238976Sth 			 * nfs4_remap_file().
385b9238976Sth 			 *
386b9238976Sth 			 * This stub will be for a mirror-mount.
387b9238976Sth 			 *
388b9238976Sth 			 * We don't bother with taking r_state_lock to
389b9238976Sth 			 * set the stub type because this is a new rnode
390b9238976Sth 			 * and we're holding the hash bucket r_lock RW_WRITER.
391b9238976Sth 			 * No other thread could have obtained access
392b9238976Sth 			 * to this rnode.
3937c478bd9Sstevel@tonic-gate 			 */
394b9238976Sth 			is_stub = 0;
3957c478bd9Sstevel@tonic-gate 			if (garp->n4g_fsid_valid) {
396b9238976Sth 				fattr4_fsid ga_fsid = garp->n4g_fsid;
397b9238976Sth 				servinfo4_t *svp = rp->r_server;
3987c478bd9Sstevel@tonic-gate 
399b9238976Sth 				rp->r_srv_fsid = ga_fsid;
4007c478bd9Sstevel@tonic-gate 
401b9238976Sth 				(void) nfs_rw_enter_sig(&svp->sv_lock,
402b9238976Sth 				    RW_READER, 0);
403b9238976Sth 				if (!FATTR4_FSID_EQ(&ga_fsid, &svp->sv_fsid))
404b9238976Sth 					is_stub = 1;
405b9238976Sth 				nfs_rw_exit(&svp->sv_lock);
4067c478bd9Sstevel@tonic-gate 			}
4077c478bd9Sstevel@tonic-gate 
408b9238976Sth 			if (is_stub)
409b9238976Sth 				r4_stub_mirrormount(rp);
410b9238976Sth 			else
411b9238976Sth 				r4_stub_none(rp);
412b9238976Sth 
4137c478bd9Sstevel@tonic-gate 			/* Can not cache partial attr */
4147c478bd9Sstevel@tonic-gate 			if (attr->va_mask == AT_ALL)
4157c478bd9Sstevel@tonic-gate 				nfs4_attrcache_noinval(vp, garp, t);
4167c478bd9Sstevel@tonic-gate 			else
4177c478bd9Sstevel@tonic-gate 				PURGE_ATTRCACHE4(vp);
4187c478bd9Sstevel@tonic-gate 
4197c478bd9Sstevel@tonic-gate 			rw_exit(&rtable4[index].r_lock);
4207c478bd9Sstevel@tonic-gate 		}
4217c478bd9Sstevel@tonic-gate 	} else {
4227c478bd9Sstevel@tonic-gate 		if (newnode) {
4237c478bd9Sstevel@tonic-gate 			PURGE_ATTRCACHE4(vp);
4247c478bd9Sstevel@tonic-gate 		}
4257c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
4267c478bd9Sstevel@tonic-gate 	}
4277c478bd9Sstevel@tonic-gate }
4287c478bd9Sstevel@tonic-gate 
4297c478bd9Sstevel@tonic-gate /*
4307c478bd9Sstevel@tonic-gate  * Find or create an rnode based primarily on filehandle.  To be
4317c478bd9Sstevel@tonic-gate  * used when dvp (vnode for parent directory) is not available;
4327c478bd9Sstevel@tonic-gate  * otherwise, makenfs4node() should be used.
4337c478bd9Sstevel@tonic-gate  *
4347c478bd9Sstevel@tonic-gate  * The nfs4_fname_t argument *npp is consumed and nulled out.
4357c478bd9Sstevel@tonic-gate  */
4367c478bd9Sstevel@tonic-gate 
4377c478bd9Sstevel@tonic-gate vnode_t *
4387c478bd9Sstevel@tonic-gate makenfs4node_by_fh(nfs4_sharedfh_t *sfh, nfs4_sharedfh_t *psfh,
439b9238976Sth     nfs4_fname_t **npp, nfs4_ga_res_t *garp,
440b9238976Sth     mntinfo4_t *mi, cred_t *cr, hrtime_t t)
4417c478bd9Sstevel@tonic-gate {
4427c478bd9Sstevel@tonic-gate 	vfs_t *vfsp = mi->mi_vfsp;
4437c478bd9Sstevel@tonic-gate 	int newnode = 0;
4447c478bd9Sstevel@tonic-gate 	vnode_t *vp;
4457c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
4467c478bd9Sstevel@tonic-gate 	svnode_t *svp;
447*bbf2a467SNagakiran Rajashekar 	nfs4_fname_t *name, *svpname;
4487c478bd9Sstevel@tonic-gate 	int index;
4497c478bd9Sstevel@tonic-gate 
4507c478bd9Sstevel@tonic-gate 	ASSERT(npp && *npp);
4517c478bd9Sstevel@tonic-gate 	name = *npp;
4527c478bd9Sstevel@tonic-gate 	*npp = NULL;
4537c478bd9Sstevel@tonic-gate 
4547c478bd9Sstevel@tonic-gate 	index = rtable4hash(sfh);
4557c478bd9Sstevel@tonic-gate 	rw_enter(&rtable4[index].r_lock, RW_READER);
4567c478bd9Sstevel@tonic-gate 
4577c478bd9Sstevel@tonic-gate 	vp = make_rnode4(sfh, &rtable4[index], vfsp,
4587c478bd9Sstevel@tonic-gate 	    nfs4_vnodeops, nfs4_putapage, &newnode, cr);
459*bbf2a467SNagakiran Rajashekar 
460*bbf2a467SNagakiran Rajashekar 	svp = VTOSV(vp);
461*bbf2a467SNagakiran Rajashekar 	rp = VTOR4(vp);
4627c478bd9Sstevel@tonic-gate 	if (newnode) {
4637c478bd9Sstevel@tonic-gate 		svp->sv_forw = svp->sv_back = svp;
4647c478bd9Sstevel@tonic-gate 		svp->sv_name = name;
4657c478bd9Sstevel@tonic-gate 		if (psfh != NULL)
4667c478bd9Sstevel@tonic-gate 			sfh4_hold(psfh);
4677c478bd9Sstevel@tonic-gate 		svp->sv_dfh = psfh;
468*bbf2a467SNagakiran Rajashekar 	} else if (vp->v_type == VDIR) {
469*bbf2a467SNagakiran Rajashekar 		/*
470*bbf2a467SNagakiran Rajashekar 		 * It is possible that due to a server
471*bbf2a467SNagakiran Rajashekar 		 * side rename fnames have changed.
472*bbf2a467SNagakiran Rajashekar 		 * update the fname here.
473*bbf2a467SNagakiran Rajashekar 		 */
474*bbf2a467SNagakiran Rajashekar 		mutex_enter(&rp->r_svlock);
475*bbf2a467SNagakiran Rajashekar 		svpname = svp->sv_name;
476*bbf2a467SNagakiran Rajashekar 		if (svp->sv_name != name) {
477*bbf2a467SNagakiran Rajashekar 			svp->sv_name = name;
478*bbf2a467SNagakiran Rajashekar 			mutex_exit(&rp->r_svlock);
479*bbf2a467SNagakiran Rajashekar 			fn_rele(&svpname);
480*bbf2a467SNagakiran Rajashekar 		} else {
481*bbf2a467SNagakiran Rajashekar 			mutex_exit(&rp->r_svlock);
482*bbf2a467SNagakiran Rajashekar 			fn_rele(&name);
483*bbf2a467SNagakiran Rajashekar 		}
4847c478bd9Sstevel@tonic-gate 	} else {
4857c478bd9Sstevel@tonic-gate 		fn_rele(&name);
4867c478bd9Sstevel@tonic-gate 	}
4877c478bd9Sstevel@tonic-gate 
4887c478bd9Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock));
4897c478bd9Sstevel@tonic-gate 	r4_do_attrcache(vp, garp, newnode, t, cr, index);
4907c478bd9Sstevel@tonic-gate 	ASSERT(rw_owner(&rtable4[index].r_lock) != curthread);
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate 	return (vp);
4937c478bd9Sstevel@tonic-gate }
4947c478bd9Sstevel@tonic-gate 
4957c478bd9Sstevel@tonic-gate /*
4967c478bd9Sstevel@tonic-gate  * Find or create a vnode for the given filehandle, filesystem, parent, and
4977c478bd9Sstevel@tonic-gate  * name.  The reference to nm is consumed, so the caller must first do an
4987c478bd9Sstevel@tonic-gate  * fn_hold() if it wants to continue using nm after this call.
4997c478bd9Sstevel@tonic-gate  */
5007c478bd9Sstevel@tonic-gate vnode_t *
5017c478bd9Sstevel@tonic-gate makenfs4node(nfs4_sharedfh_t *fh, nfs4_ga_res_t *garp, struct vfs *vfsp,
502b9238976Sth     hrtime_t t, cred_t *cr, vnode_t *dvp, nfs4_fname_t *nm)
5037c478bd9Sstevel@tonic-gate {
5047c478bd9Sstevel@tonic-gate 	vnode_t *vp;
5057c478bd9Sstevel@tonic-gate 	int newnode;
5067c478bd9Sstevel@tonic-gate 	int index;
5077c478bd9Sstevel@tonic-gate 	mntinfo4_t *mi = VFTOMI4(vfsp);
5087c478bd9Sstevel@tonic-gate 	int had_badfh = 0;
5097c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 	ASSERT(dvp != NULL);
5127c478bd9Sstevel@tonic-gate 
5137c478bd9Sstevel@tonic-gate 	fh = badrootfh_check(fh, nm, mi, &had_badfh);
5147c478bd9Sstevel@tonic-gate 
5157c478bd9Sstevel@tonic-gate 	index = rtable4hash(fh);
5167c478bd9Sstevel@tonic-gate 	rw_enter(&rtable4[index].r_lock, RW_READER);
5177c478bd9Sstevel@tonic-gate 
5187c478bd9Sstevel@tonic-gate 	/*
5197c478bd9Sstevel@tonic-gate 	 * Note: make_rnode4() may upgrade the hash bucket lock to exclusive.
5207c478bd9Sstevel@tonic-gate 	 */
5217c478bd9Sstevel@tonic-gate 	vp = make_rnode4(fh, &rtable4[index], vfsp, nfs4_vnodeops,
5227c478bd9Sstevel@tonic-gate 	    nfs4_putapage, &newnode, cr);
5237c478bd9Sstevel@tonic-gate 
5247c478bd9Sstevel@tonic-gate 	rp = VTOR4(vp);
5257c478bd9Sstevel@tonic-gate 	sv_activate(&vp, dvp, &nm, newnode);
5267c478bd9Sstevel@tonic-gate 	if (dvp->v_flag & V_XATTRDIR) {
5277c478bd9Sstevel@tonic-gate 		mutex_enter(&rp->r_statelock);
5287c478bd9Sstevel@tonic-gate 		rp->r_flags |= R4ISXATTR;
5297c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_statelock);
5307c478bd9Sstevel@tonic-gate 	}
5317c478bd9Sstevel@tonic-gate 
5327c478bd9Sstevel@tonic-gate 	/* if getting a bad file handle, do not cache the attributes. */
5337c478bd9Sstevel@tonic-gate 	if (had_badfh) {
5347c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
5357c478bd9Sstevel@tonic-gate 		return (vp);
5367c478bd9Sstevel@tonic-gate 	}
5377c478bd9Sstevel@tonic-gate 
5387c478bd9Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock));
5397c478bd9Sstevel@tonic-gate 	r4_do_attrcache(vp, garp, newnode, t, cr, index);
5407c478bd9Sstevel@tonic-gate 	ASSERT(rw_owner(&rtable4[index].r_lock) != curthread);
5417c478bd9Sstevel@tonic-gate 
5427c478bd9Sstevel@tonic-gate 	return (vp);
5437c478bd9Sstevel@tonic-gate }
5447c478bd9Sstevel@tonic-gate 
5457c478bd9Sstevel@tonic-gate /*
5467c478bd9Sstevel@tonic-gate  * Hash on address of filehandle object.
5477c478bd9Sstevel@tonic-gate  * XXX totally untuned.
5487c478bd9Sstevel@tonic-gate  */
5497c478bd9Sstevel@tonic-gate 
5507c478bd9Sstevel@tonic-gate int
5517c478bd9Sstevel@tonic-gate rtable4hash(nfs4_sharedfh_t *fh)
5527c478bd9Sstevel@tonic-gate {
5537c478bd9Sstevel@tonic-gate 	return (((uintptr_t)fh / sizeof (*fh)) & rtable4mask);
5547c478bd9Sstevel@tonic-gate }
5557c478bd9Sstevel@tonic-gate 
5567c478bd9Sstevel@tonic-gate /*
5577c478bd9Sstevel@tonic-gate  * Find or create the vnode for the given filehandle and filesystem.
5587c478bd9Sstevel@tonic-gate  * *newnode is set to zero if the vnode already existed; non-zero if it had
5597c478bd9Sstevel@tonic-gate  * to be created.
5607c478bd9Sstevel@tonic-gate  *
5617c478bd9Sstevel@tonic-gate  * Note: make_rnode4() may upgrade the hash bucket lock to exclusive.
5627c478bd9Sstevel@tonic-gate  */
5637c478bd9Sstevel@tonic-gate 
5647c478bd9Sstevel@tonic-gate static vnode_t *
5657c478bd9Sstevel@tonic-gate make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp,
5667c478bd9Sstevel@tonic-gate     struct vnodeops *vops,
5677c478bd9Sstevel@tonic-gate     int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *),
5687c478bd9Sstevel@tonic-gate     int *newnode, cred_t *cr)
5697c478bd9Sstevel@tonic-gate {
5707c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
5717c478bd9Sstevel@tonic-gate 	rnode4_t *trp;
5727c478bd9Sstevel@tonic-gate 	vnode_t *vp;
5737c478bd9Sstevel@tonic-gate 	mntinfo4_t *mi;
5747c478bd9Sstevel@tonic-gate 
5757c478bd9Sstevel@tonic-gate 	ASSERT(RW_READ_HELD(&rhtp->r_lock));
5767c478bd9Sstevel@tonic-gate 
5777c478bd9Sstevel@tonic-gate 	mi = VFTOMI4(vfsp);
5787c478bd9Sstevel@tonic-gate 
5797c478bd9Sstevel@tonic-gate start:
5807c478bd9Sstevel@tonic-gate 	if ((rp = r4find(rhtp, fh, vfsp)) != NULL) {
5817c478bd9Sstevel@tonic-gate 		vp = RTOV4(rp);
5827c478bd9Sstevel@tonic-gate 		*newnode = 0;
5837c478bd9Sstevel@tonic-gate 		return (vp);
5847c478bd9Sstevel@tonic-gate 	}
5857c478bd9Sstevel@tonic-gate 	rw_exit(&rhtp->r_lock);
5867c478bd9Sstevel@tonic-gate 
5877c478bd9Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
5887c478bd9Sstevel@tonic-gate 
5897c478bd9Sstevel@tonic-gate 	if (rp4freelist != NULL && rnode4_new >= nrnode) {
5907c478bd9Sstevel@tonic-gate 		rp = rp4freelist;
5917c478bd9Sstevel@tonic-gate 		rp4_rmfree(rp);
5927c478bd9Sstevel@tonic-gate 		mutex_exit(&rp4freelist_lock);
5937c478bd9Sstevel@tonic-gate 
5947c478bd9Sstevel@tonic-gate 		vp = RTOV4(rp);
5957c478bd9Sstevel@tonic-gate 
5967c478bd9Sstevel@tonic-gate 		if (rp->r_flags & R4HASHED) {
5977c478bd9Sstevel@tonic-gate 			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
5987c478bd9Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
5997c478bd9Sstevel@tonic-gate 			if (vp->v_count > 1) {
6007c478bd9Sstevel@tonic-gate 				vp->v_count--;
6017c478bd9Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
6027c478bd9Sstevel@tonic-gate 				rw_exit(&rp->r_hashq->r_lock);
6037c478bd9Sstevel@tonic-gate 				rw_enter(&rhtp->r_lock, RW_READER);
6047c478bd9Sstevel@tonic-gate 				goto start;
6057c478bd9Sstevel@tonic-gate 			}
6067c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
6077c478bd9Sstevel@tonic-gate 			rp4_rmhash_locked(rp);
6087c478bd9Sstevel@tonic-gate 			rw_exit(&rp->r_hashq->r_lock);
6097c478bd9Sstevel@tonic-gate 		}
6107c478bd9Sstevel@tonic-gate 
6117c478bd9Sstevel@tonic-gate 		r4inactive(rp, cr);
6127c478bd9Sstevel@tonic-gate 
6137c478bd9Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
6147c478bd9Sstevel@tonic-gate 		if (vp->v_count > 1) {
6157c478bd9Sstevel@tonic-gate 			vp->v_count--;
6167c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
6177c478bd9Sstevel@tonic-gate 			rw_enter(&rhtp->r_lock, RW_READER);
6187c478bd9Sstevel@tonic-gate 			goto start;
6197c478bd9Sstevel@tonic-gate 		}
6207c478bd9Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
6217c478bd9Sstevel@tonic-gate 		vn_invalid(vp);
6227c478bd9Sstevel@tonic-gate 
6237c478bd9Sstevel@tonic-gate 		/*
6247c478bd9Sstevel@tonic-gate 		 * destroy old locks before bzero'ing and
6257c478bd9Sstevel@tonic-gate 		 * recreating the locks below.
6267c478bd9Sstevel@tonic-gate 		 */
6277c478bd9Sstevel@tonic-gate 		uninit_rnode4(rp);
6287c478bd9Sstevel@tonic-gate 
6297c478bd9Sstevel@tonic-gate 		/*
6307c478bd9Sstevel@tonic-gate 		 * Make sure that if rnode is recycled then
6317c478bd9Sstevel@tonic-gate 		 * VFS count is decremented properly before
6327c478bd9Sstevel@tonic-gate 		 * reuse.
6337c478bd9Sstevel@tonic-gate 		 */
6347c478bd9Sstevel@tonic-gate 		VFS_RELE(vp->v_vfsp);
6357c478bd9Sstevel@tonic-gate 		vn_reinit(vp);
6367c478bd9Sstevel@tonic-gate 	} else {
6377c478bd9Sstevel@tonic-gate 		vnode_t *new_vp;
6387c478bd9Sstevel@tonic-gate 
6397c478bd9Sstevel@tonic-gate 		mutex_exit(&rp4freelist_lock);
6407c478bd9Sstevel@tonic-gate 
6417c478bd9Sstevel@tonic-gate 		rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
6427c478bd9Sstevel@tonic-gate 		new_vp = vn_alloc(KM_SLEEP);
6437c478bd9Sstevel@tonic-gate 
6447c478bd9Sstevel@tonic-gate 		atomic_add_long((ulong_t *)&rnode4_new, 1);
6457c478bd9Sstevel@tonic-gate #ifdef DEBUG
6467c478bd9Sstevel@tonic-gate 		clstat4_debug.nrnode.value.ui64++;
6477c478bd9Sstevel@tonic-gate #endif
6487c478bd9Sstevel@tonic-gate 		vp = new_vp;
6497c478bd9Sstevel@tonic-gate 	}
6507c478bd9Sstevel@tonic-gate 
6517c478bd9Sstevel@tonic-gate 	bzero(rp, sizeof (*rp));
6527c478bd9Sstevel@tonic-gate 	rp->r_vnode = vp;
6537c478bd9Sstevel@tonic-gate 	nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
6547c478bd9Sstevel@tonic-gate 	nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
6557c478bd9Sstevel@tonic-gate 	mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
6567c478bd9Sstevel@tonic-gate 	mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
6577c478bd9Sstevel@tonic-gate 	mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
6587c478bd9Sstevel@tonic-gate 	mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
6597c478bd9Sstevel@tonic-gate 	rp->created_v4 = 0;
6607c478bd9Sstevel@tonic-gate 	list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
6617c478bd9Sstevel@tonic-gate 	    offsetof(nfs4_open_stream_t, os_node));
6627c478bd9Sstevel@tonic-gate 	rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
6637c478bd9Sstevel@tonic-gate 	rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
6647c478bd9Sstevel@tonic-gate 	cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
6657c478bd9Sstevel@tonic-gate 	cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
6667c478bd9Sstevel@tonic-gate 	rp->r_flags = R4READDIRWATTR;
6677c478bd9Sstevel@tonic-gate 	rp->r_fh = fh;
6687c478bd9Sstevel@tonic-gate 	rp->r_hashq = rhtp;
6697c478bd9Sstevel@tonic-gate 	sfh4_hold(rp->r_fh);
6707c478bd9Sstevel@tonic-gate 	rp->r_server = mi->mi_curr_serv;
6717c478bd9Sstevel@tonic-gate 	rp->r_deleg_type = OPEN_DELEGATE_NONE;
6727c478bd9Sstevel@tonic-gate 	rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE;
6737c478bd9Sstevel@tonic-gate 	nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL);
6747c478bd9Sstevel@tonic-gate 
6757c478bd9Sstevel@tonic-gate 	rddir4_cache_create(rp);
6767c478bd9Sstevel@tonic-gate 	rp->r_putapage = putapage;
6777c478bd9Sstevel@tonic-gate 	vn_setops(vp, vops);
6787c478bd9Sstevel@tonic-gate 	vp->v_data = (caddr_t)rp;
6797c478bd9Sstevel@tonic-gate 	vp->v_vfsp = vfsp;
6807c478bd9Sstevel@tonic-gate 	VFS_HOLD(vfsp);
6817c478bd9Sstevel@tonic-gate 	vp->v_type = VNON;
6827c478bd9Sstevel@tonic-gate 	if (isrootfh(fh, rp))
6837c478bd9Sstevel@tonic-gate 		vp->v_flag = VROOT;
6847c478bd9Sstevel@tonic-gate 	vn_exists(vp);
6857c478bd9Sstevel@tonic-gate 
6867c478bd9Sstevel@tonic-gate 	/*
6877c478bd9Sstevel@tonic-gate 	 * There is a race condition if someone else
6887c478bd9Sstevel@tonic-gate 	 * alloc's the rnode while no locks are held, so we
6897c478bd9Sstevel@tonic-gate 	 * check again and recover if found.
6907c478bd9Sstevel@tonic-gate 	 */
6917c478bd9Sstevel@tonic-gate 	rw_enter(&rhtp->r_lock, RW_WRITER);
6927c478bd9Sstevel@tonic-gate 	if ((trp = r4find(rhtp, fh, vfsp)) != NULL) {
6937c478bd9Sstevel@tonic-gate 		vp = RTOV4(trp);
6947c478bd9Sstevel@tonic-gate 		*newnode = 0;
6957c478bd9Sstevel@tonic-gate 		rw_exit(&rhtp->r_lock);
6967c478bd9Sstevel@tonic-gate 		rp4_addfree(rp, cr);
6977c478bd9Sstevel@tonic-gate 		rw_enter(&rhtp->r_lock, RW_READER);
6987c478bd9Sstevel@tonic-gate 		return (vp);
6997c478bd9Sstevel@tonic-gate 	}
7007c478bd9Sstevel@tonic-gate 	rp4_addhash(rp);
7017c478bd9Sstevel@tonic-gate 	*newnode = 1;
7027c478bd9Sstevel@tonic-gate 	return (vp);
7037c478bd9Sstevel@tonic-gate }
7047c478bd9Sstevel@tonic-gate 
7057c478bd9Sstevel@tonic-gate static void
7067c478bd9Sstevel@tonic-gate uninit_rnode4(rnode4_t *rp)
7077c478bd9Sstevel@tonic-gate {
7087c478bd9Sstevel@tonic-gate 	vnode_t *vp = RTOV4(rp);
7097c478bd9Sstevel@tonic-gate 
7107c478bd9Sstevel@tonic-gate 	ASSERT(rp != NULL);
7117c478bd9Sstevel@tonic-gate 	ASSERT(vp != NULL);
7127c478bd9Sstevel@tonic-gate 	ASSERT(vp->v_count == 1);
7137c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_count == 0);
7147c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_mapcnt == 0);
7157c478bd9Sstevel@tonic-gate 	if (rp->r_flags & R4LODANGLERS) {
7167c478bd9Sstevel@tonic-gate 		nfs4_flush_lock_owners(rp);
7177c478bd9Sstevel@tonic-gate 	}
7187c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_lo_head.lo_next_rnode == &rp->r_lo_head);
7197c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_lo_head.lo_prev_rnode == &rp->r_lo_head);
7207c478bd9Sstevel@tonic-gate 	ASSERT(!(rp->r_flags & R4HASHED));
7217c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
7227c478bd9Sstevel@tonic-gate 	nfs4_clear_open_streams(rp);
7237c478bd9Sstevel@tonic-gate 	list_destroy(&rp->r_open_streams);
7247c478bd9Sstevel@tonic-gate 
7257c478bd9Sstevel@tonic-gate 	/*
7267c478bd9Sstevel@tonic-gate 	 * Destroy the rddir cache first since we need to grab the r_statelock.
7277c478bd9Sstevel@tonic-gate 	 */
7287c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
7297c478bd9Sstevel@tonic-gate 	rddir4_cache_destroy(rp);
7307c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
7317c478bd9Sstevel@tonic-gate 	sv_uninit(&rp->r_svnode);
7327c478bd9Sstevel@tonic-gate 	sfh4_rele(&rp->r_fh);
7337c478bd9Sstevel@tonic-gate 	nfs_rw_destroy(&rp->r_rwlock);
7347c478bd9Sstevel@tonic-gate 	nfs_rw_destroy(&rp->r_lkserlock);
7357c478bd9Sstevel@tonic-gate 	mutex_destroy(&rp->r_statelock);
7367c478bd9Sstevel@tonic-gate 	mutex_destroy(&rp->r_statev4_lock);
7377c478bd9Sstevel@tonic-gate 	mutex_destroy(&rp->r_os_lock);
7387c478bd9Sstevel@tonic-gate 	cv_destroy(&rp->r_cv);
7397c478bd9Sstevel@tonic-gate 	cv_destroy(&rp->r_commit.c_cv);
7407c478bd9Sstevel@tonic-gate 	nfs_rw_destroy(&rp->r_deleg_recall_lock);
7417c478bd9Sstevel@tonic-gate 	if (rp->r_flags & R4DELMAPLIST)
7427c478bd9Sstevel@tonic-gate 		list_destroy(&rp->r_indelmap);
7437c478bd9Sstevel@tonic-gate }
7447c478bd9Sstevel@tonic-gate 
7457c478bd9Sstevel@tonic-gate /*
7467c478bd9Sstevel@tonic-gate  * Put an rnode on the free list.
7477c478bd9Sstevel@tonic-gate  *
7487c478bd9Sstevel@tonic-gate  * Rnodes which were allocated above and beyond the normal limit
7497c478bd9Sstevel@tonic-gate  * are immediately freed.
7507c478bd9Sstevel@tonic-gate  */
7517c478bd9Sstevel@tonic-gate void
7527c478bd9Sstevel@tonic-gate rp4_addfree(rnode4_t *rp, cred_t *cr)
7537c478bd9Sstevel@tonic-gate {
7547c478bd9Sstevel@tonic-gate 	vnode_t *vp;
7557c478bd9Sstevel@tonic-gate 	vnode_t *xattr;
7567c478bd9Sstevel@tonic-gate 	struct vfs *vfsp;
7577c478bd9Sstevel@tonic-gate 
7587c478bd9Sstevel@tonic-gate 	vp = RTOV4(rp);
7597c478bd9Sstevel@tonic-gate 	ASSERT(vp->v_count >= 1);
7607c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
7617c478bd9Sstevel@tonic-gate 
7627c478bd9Sstevel@tonic-gate 	/*
7637c478bd9Sstevel@tonic-gate 	 * If we have too many rnodes allocated and there are no
7647c478bd9Sstevel@tonic-gate 	 * references to this rnode, or if the rnode is no longer
7657c478bd9Sstevel@tonic-gate 	 * accessible by it does not reside in the hash queues,
7667c478bd9Sstevel@tonic-gate 	 * or if an i/o error occurred while writing to the file,
7677c478bd9Sstevel@tonic-gate 	 * then just free it instead of putting it on the rnode
7687c478bd9Sstevel@tonic-gate 	 * freelist.
7697c478bd9Sstevel@tonic-gate 	 */
7707c478bd9Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
7717c478bd9Sstevel@tonic-gate 	if (((rnode4_new > nrnode || !(rp->r_flags & R4HASHED) ||
7727c478bd9Sstevel@tonic-gate #ifdef DEBUG
7737c478bd9Sstevel@tonic-gate 	    (nfs4_rnode_nofreelist != 0) ||
7747c478bd9Sstevel@tonic-gate #endif
7757c478bd9Sstevel@tonic-gate 	    rp->r_error || (rp->r_flags & R4RECOVERR) ||
7767c478bd9Sstevel@tonic-gate 	    (vfsp->vfs_flag & VFS_UNMOUNTED)) && rp->r_count == 0)) {
7777c478bd9Sstevel@tonic-gate 		if (rp->r_flags & R4HASHED) {
7787c478bd9Sstevel@tonic-gate 			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
7797c478bd9Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
7807c478bd9Sstevel@tonic-gate 			if (vp->v_count > 1) {
7817c478bd9Sstevel@tonic-gate 				vp->v_count--;
7827c478bd9Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
7837c478bd9Sstevel@tonic-gate 				rw_exit(&rp->r_hashq->r_lock);
7847c478bd9Sstevel@tonic-gate 				return;
7857c478bd9Sstevel@tonic-gate 			}
7867c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
7877c478bd9Sstevel@tonic-gate 			rp4_rmhash_locked(rp);
7887c478bd9Sstevel@tonic-gate 			rw_exit(&rp->r_hashq->r_lock);
7897c478bd9Sstevel@tonic-gate 		}
7907c478bd9Sstevel@tonic-gate 
7917c478bd9Sstevel@tonic-gate 		/*
7927c478bd9Sstevel@tonic-gate 		 * Make sure we don't have a delegation on this rnode
7937c478bd9Sstevel@tonic-gate 		 * before destroying it.
7947c478bd9Sstevel@tonic-gate 		 */
7957c478bd9Sstevel@tonic-gate 		if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
7967c478bd9Sstevel@tonic-gate 			(void) nfs4delegreturn(rp,
797b9238976Sth 			    NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN);
7987c478bd9Sstevel@tonic-gate 		}
7997c478bd9Sstevel@tonic-gate 
8007c478bd9Sstevel@tonic-gate 		r4inactive(rp, cr);
8017c478bd9Sstevel@tonic-gate 
8027c478bd9Sstevel@tonic-gate 		/*
8037c478bd9Sstevel@tonic-gate 		 * Recheck the vnode reference count.  We need to
8047c478bd9Sstevel@tonic-gate 		 * make sure that another reference has not been
8057c478bd9Sstevel@tonic-gate 		 * acquired while we were not holding v_lock.  The
8067c478bd9Sstevel@tonic-gate 		 * rnode is not in the rnode hash queues; one
8077c478bd9Sstevel@tonic-gate 		 * way for a reference to have been acquired
8087c478bd9Sstevel@tonic-gate 		 * is for a VOP_PUTPAGE because the rnode was marked
8097c478bd9Sstevel@tonic-gate 		 * with R4DIRTY or for a modified page.  This
8107c478bd9Sstevel@tonic-gate 		 * reference may have been acquired before our call
8117c478bd9Sstevel@tonic-gate 		 * to r4inactive.  The i/o may have been completed,
8127c478bd9Sstevel@tonic-gate 		 * thus allowing r4inactive to complete, but the
8137c478bd9Sstevel@tonic-gate 		 * reference to the vnode may not have been released
8147c478bd9Sstevel@tonic-gate 		 * yet.  In any case, the rnode can not be destroyed
8157c478bd9Sstevel@tonic-gate 		 * until the other references to this vnode have been
8167c478bd9Sstevel@tonic-gate 		 * released.  The other references will take care of
8177c478bd9Sstevel@tonic-gate 		 * either destroying the rnode or placing it on the
8187c478bd9Sstevel@tonic-gate 		 * rnode freelist.  If there are no other references,
8197c478bd9Sstevel@tonic-gate 		 * then the rnode may be safely destroyed.
8207c478bd9Sstevel@tonic-gate 		 */
8217c478bd9Sstevel@tonic-gate 		mutex_enter(&vp->v_lock);
8227c478bd9Sstevel@tonic-gate 		if (vp->v_count > 1) {
8237c478bd9Sstevel@tonic-gate 			vp->v_count--;
8247c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
8257c478bd9Sstevel@tonic-gate 			return;
8267c478bd9Sstevel@tonic-gate 		}
8277c478bd9Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
8287c478bd9Sstevel@tonic-gate 
8297c478bd9Sstevel@tonic-gate 		destroy_rnode4(rp);
8307c478bd9Sstevel@tonic-gate 		return;
8317c478bd9Sstevel@tonic-gate 	}
8327c478bd9Sstevel@tonic-gate 
8337c478bd9Sstevel@tonic-gate 	/*
8347c478bd9Sstevel@tonic-gate 	 * Lock the hash queue and then recheck the reference count
8357c478bd9Sstevel@tonic-gate 	 * to ensure that no other threads have acquired a reference
8367c478bd9Sstevel@tonic-gate 	 * to indicate that the rnode should not be placed on the
8377c478bd9Sstevel@tonic-gate 	 * freelist.  If another reference has been acquired, then
8387c478bd9Sstevel@tonic-gate 	 * just release this one and let the other thread complete
8397c478bd9Sstevel@tonic-gate 	 * the processing of adding this rnode to the freelist.
8407c478bd9Sstevel@tonic-gate 	 */
8417c478bd9Sstevel@tonic-gate again:
8427c478bd9Sstevel@tonic-gate 	rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
8437c478bd9Sstevel@tonic-gate 
8447c478bd9Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
8457c478bd9Sstevel@tonic-gate 	if (vp->v_count > 1) {
8467c478bd9Sstevel@tonic-gate 		vp->v_count--;
8477c478bd9Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
8487c478bd9Sstevel@tonic-gate 		rw_exit(&rp->r_hashq->r_lock);
8497c478bd9Sstevel@tonic-gate 		return;
8507c478bd9Sstevel@tonic-gate 	}
8517c478bd9Sstevel@tonic-gate 	mutex_exit(&vp->v_lock);
8527c478bd9Sstevel@tonic-gate 
8537c478bd9Sstevel@tonic-gate 	/*
8547c478bd9Sstevel@tonic-gate 	 * Make sure we don't put an rnode with a delegation
8557c478bd9Sstevel@tonic-gate 	 * on the free list.
8567c478bd9Sstevel@tonic-gate 	 */
8577c478bd9Sstevel@tonic-gate 	if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
8587c478bd9Sstevel@tonic-gate 		rw_exit(&rp->r_hashq->r_lock);
8597c478bd9Sstevel@tonic-gate 		(void) nfs4delegreturn(rp,
860b9238976Sth 		    NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN);
8617c478bd9Sstevel@tonic-gate 		goto again;
8627c478bd9Sstevel@tonic-gate 	}
8637c478bd9Sstevel@tonic-gate 
8647c478bd9Sstevel@tonic-gate 	/*
8657c478bd9Sstevel@tonic-gate 	 * Now that we have the hash queue lock, and we know there
8667c478bd9Sstevel@tonic-gate 	 * are not anymore references on the vnode, check to make
8677c478bd9Sstevel@tonic-gate 	 * sure there aren't any open streams still on the rnode.
8687c478bd9Sstevel@tonic-gate 	 * If so, drop the hash queue lock, remove the open streams,
8697c478bd9Sstevel@tonic-gate 	 * and recheck the v_count.
8707c478bd9Sstevel@tonic-gate 	 */
8717c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_os_lock);
8727c478bd9Sstevel@tonic-gate 	if (list_head(&rp->r_open_streams) != NULL) {
8737c478bd9Sstevel@tonic-gate 		mutex_exit(&rp->r_os_lock);
8747c478bd9Sstevel@tonic-gate 		rw_exit(&rp->r_hashq->r_lock);
875108322fbScarlsonj 		if (nfs_zone() != VTOMI4(vp)->mi_zone)
8767c478bd9Sstevel@tonic-gate 			nfs4_clear_open_streams(rp);
8777c478bd9Sstevel@tonic-gate 		else
8787c478bd9Sstevel@tonic-gate 			(void) nfs4close_all(vp, cr);
8797c478bd9Sstevel@tonic-gate 		goto again;
8807c478bd9Sstevel@tonic-gate 	}
8817c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_os_lock);
8827c478bd9Sstevel@tonic-gate 
8839f9e2373Sjwahlig 	/*
8849f9e2373Sjwahlig 	 * Before we put it on the freelist, make sure there are no pages.
8859f9e2373Sjwahlig 	 * If there are, flush and commit of all of the dirty and
8869f9e2373Sjwahlig 	 * uncommitted pages, assuming the file system isn't read only.
8879f9e2373Sjwahlig 	 */
8889f9e2373Sjwahlig 	if (!(vp->v_vfsp->vfs_flag & VFS_RDONLY) && nfs4_dross_pages(vp)) {
8899f9e2373Sjwahlig 		rw_exit(&rp->r_hashq->r_lock);
8909f9e2373Sjwahlig 		r4flushpages(rp, cr);
8919f9e2373Sjwahlig 		goto again;
8929f9e2373Sjwahlig 	}
8939f9e2373Sjwahlig 
8947c478bd9Sstevel@tonic-gate 	/*
8957c478bd9Sstevel@tonic-gate 	 * Before we put it on the freelist, make sure there is no
8967c478bd9Sstevel@tonic-gate 	 * active xattr directory cached, the freelist will not
8977c478bd9Sstevel@tonic-gate 	 * have its entries r4inactive'd if there is still an active
8987c478bd9Sstevel@tonic-gate 	 * rnode, thus nothing in the freelist can hold another
8997c478bd9Sstevel@tonic-gate 	 * rnode active.
9007c478bd9Sstevel@tonic-gate 	 */
9017c478bd9Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
9027c478bd9Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
9037c478bd9Sstevel@tonic-gate 
9047c478bd9Sstevel@tonic-gate 	/*
9057c478bd9Sstevel@tonic-gate 	 * If there is no cached data or metadata for this file, then
9067c478bd9Sstevel@tonic-gate 	 * put the rnode on the front of the freelist so that it will
9077c478bd9Sstevel@tonic-gate 	 * be reused before other rnodes which may have cached data or
9087c478bd9Sstevel@tonic-gate 	 * metadata associated with them.
9097c478bd9Sstevel@tonic-gate 	 */
9107c478bd9Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
9117c478bd9Sstevel@tonic-gate 	if (rp4freelist == NULL) {
9127c478bd9Sstevel@tonic-gate 		rp->r_freef = rp;
9137c478bd9Sstevel@tonic-gate 		rp->r_freeb = rp;
9147c478bd9Sstevel@tonic-gate 		rp4freelist = rp;
9157c478bd9Sstevel@tonic-gate 	} else {
9167c478bd9Sstevel@tonic-gate 		rp->r_freef = rp4freelist;
9177c478bd9Sstevel@tonic-gate 		rp->r_freeb = rp4freelist->r_freeb;
9187c478bd9Sstevel@tonic-gate 		rp4freelist->r_freeb->r_freef = rp;
9197c478bd9Sstevel@tonic-gate 		rp4freelist->r_freeb = rp;
9207c478bd9Sstevel@tonic-gate 		if (!nfs4_has_pages(vp) && rp->r_dir == NULL &&
9219f9e2373Sjwahlig 		    rp->r_symlink.contents == NULL && rp->r_secattr == NULL)
9227c478bd9Sstevel@tonic-gate 			rp4freelist = rp;
9237c478bd9Sstevel@tonic-gate 	}
9247c478bd9Sstevel@tonic-gate 	mutex_exit(&rp4freelist_lock);
9257c478bd9Sstevel@tonic-gate 
9267c478bd9Sstevel@tonic-gate 	rw_exit(&rp->r_hashq->r_lock);
9277c478bd9Sstevel@tonic-gate 
9287c478bd9Sstevel@tonic-gate 	if (xattr)
9297c478bd9Sstevel@tonic-gate 		VN_RELE(xattr);
9307c478bd9Sstevel@tonic-gate }
9317c478bd9Sstevel@tonic-gate 
9327c478bd9Sstevel@tonic-gate /*
9337c478bd9Sstevel@tonic-gate  * Remove an rnode from the free list.
9347c478bd9Sstevel@tonic-gate  *
9357c478bd9Sstevel@tonic-gate  * The caller must be holding rp4freelist_lock and the rnode
9367c478bd9Sstevel@tonic-gate  * must be on the freelist.
9377c478bd9Sstevel@tonic-gate  */
9387c478bd9Sstevel@tonic-gate static void
9397c478bd9Sstevel@tonic-gate rp4_rmfree(rnode4_t *rp)
9407c478bd9Sstevel@tonic-gate {
9417c478bd9Sstevel@tonic-gate 
9427c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&rp4freelist_lock));
9437c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_freef != NULL && rp->r_freeb != NULL);
9447c478bd9Sstevel@tonic-gate 
9457c478bd9Sstevel@tonic-gate 	if (rp == rp4freelist) {
9467c478bd9Sstevel@tonic-gate 		rp4freelist = rp->r_freef;
9477c478bd9Sstevel@tonic-gate 		if (rp == rp4freelist)
9487c478bd9Sstevel@tonic-gate 			rp4freelist = NULL;
9497c478bd9Sstevel@tonic-gate 	}
9507c478bd9Sstevel@tonic-gate 	rp->r_freeb->r_freef = rp->r_freef;
9517c478bd9Sstevel@tonic-gate 	rp->r_freef->r_freeb = rp->r_freeb;
9527c478bd9Sstevel@tonic-gate 
9537c478bd9Sstevel@tonic-gate 	rp->r_freef = rp->r_freeb = NULL;
9547c478bd9Sstevel@tonic-gate }
9557c478bd9Sstevel@tonic-gate 
9567c478bd9Sstevel@tonic-gate /*
9577c478bd9Sstevel@tonic-gate  * Put a rnode in the hash table.
9587c478bd9Sstevel@tonic-gate  *
9597c478bd9Sstevel@tonic-gate  * The caller must be holding the exclusive hash queue lock
9607c478bd9Sstevel@tonic-gate  */
9617c478bd9Sstevel@tonic-gate void
9627c478bd9Sstevel@tonic-gate rp4_addhash(rnode4_t *rp)
9637c478bd9Sstevel@tonic-gate {
9647c478bd9Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock));
9657c478bd9Sstevel@tonic-gate 	ASSERT(!(rp->r_flags & R4HASHED));
9667c478bd9Sstevel@tonic-gate 
9677c478bd9Sstevel@tonic-gate #ifdef DEBUG
9687c478bd9Sstevel@tonic-gate 	r4_dup_check(rp, RTOV4(rp)->v_vfsp);
9697c478bd9Sstevel@tonic-gate #endif
9707c478bd9Sstevel@tonic-gate 
9717c478bd9Sstevel@tonic-gate 	rp->r_hashf = rp->r_hashq->r_hashf;
9727c478bd9Sstevel@tonic-gate 	rp->r_hashq->r_hashf = rp;
9737c478bd9Sstevel@tonic-gate 	rp->r_hashb = (rnode4_t *)rp->r_hashq;
9747c478bd9Sstevel@tonic-gate 	rp->r_hashf->r_hashb = rp;
9757c478bd9Sstevel@tonic-gate 
9767c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
9777c478bd9Sstevel@tonic-gate 	rp->r_flags |= R4HASHED;
9787c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
9797c478bd9Sstevel@tonic-gate }
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate /*
9827c478bd9Sstevel@tonic-gate  * Remove a rnode from the hash table.
9837c478bd9Sstevel@tonic-gate  *
9847c478bd9Sstevel@tonic-gate  * The caller must be holding the hash queue lock.
9857c478bd9Sstevel@tonic-gate  */
9867c478bd9Sstevel@tonic-gate void
9877c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rnode4_t *rp)
9887c478bd9Sstevel@tonic-gate {
9897c478bd9Sstevel@tonic-gate 	ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock));
9907c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_flags & R4HASHED);
9917c478bd9Sstevel@tonic-gate 
9927c478bd9Sstevel@tonic-gate 	rp->r_hashb->r_hashf = rp->r_hashf;
9937c478bd9Sstevel@tonic-gate 	rp->r_hashf->r_hashb = rp->r_hashb;
9947c478bd9Sstevel@tonic-gate 
9957c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
9967c478bd9Sstevel@tonic-gate 	rp->r_flags &= ~R4HASHED;
9977c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
9987c478bd9Sstevel@tonic-gate }
9997c478bd9Sstevel@tonic-gate 
10007c478bd9Sstevel@tonic-gate /*
10017c478bd9Sstevel@tonic-gate  * Remove a rnode from the hash table.
10027c478bd9Sstevel@tonic-gate  *
10037c478bd9Sstevel@tonic-gate  * The caller must not be holding the hash queue lock.
10047c478bd9Sstevel@tonic-gate  */
10057c478bd9Sstevel@tonic-gate void
10067c478bd9Sstevel@tonic-gate rp4_rmhash(rnode4_t *rp)
10077c478bd9Sstevel@tonic-gate {
10087c478bd9Sstevel@tonic-gate 	rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
10097c478bd9Sstevel@tonic-gate 	rp4_rmhash_locked(rp);
10107c478bd9Sstevel@tonic-gate 	rw_exit(&rp->r_hashq->r_lock);
10117c478bd9Sstevel@tonic-gate }
10127c478bd9Sstevel@tonic-gate 
10137c478bd9Sstevel@tonic-gate /*
10147c478bd9Sstevel@tonic-gate  * Lookup a rnode by fhandle.  Ignores rnodes that had failed recovery.
10157c478bd9Sstevel@tonic-gate  * Returns NULL if no match.  If an rnode is returned, the reference count
10167c478bd9Sstevel@tonic-gate  * on the master vnode is incremented.
10177c478bd9Sstevel@tonic-gate  *
10187c478bd9Sstevel@tonic-gate  * The caller must be holding the hash queue lock, either shared or exclusive.
10197c478bd9Sstevel@tonic-gate  */
10207c478bd9Sstevel@tonic-gate rnode4_t *
10217c478bd9Sstevel@tonic-gate r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp)
10227c478bd9Sstevel@tonic-gate {
10237c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
10247c478bd9Sstevel@tonic-gate 	vnode_t *vp;
10257c478bd9Sstevel@tonic-gate 
10267c478bd9Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&rhtp->r_lock));
10277c478bd9Sstevel@tonic-gate 
10287c478bd9Sstevel@tonic-gate 	for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) {
10297c478bd9Sstevel@tonic-gate 		vp = RTOV4(rp);
10307c478bd9Sstevel@tonic-gate 		if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) {
10317c478bd9Sstevel@tonic-gate 
10327c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statelock);
10337c478bd9Sstevel@tonic-gate 			if (rp->r_flags & R4RECOVERR) {
10347c478bd9Sstevel@tonic-gate 				mutex_exit(&rp->r_statelock);
10357c478bd9Sstevel@tonic-gate 				continue;
10367c478bd9Sstevel@tonic-gate 			}
10377c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
10387c478bd9Sstevel@tonic-gate #ifdef DEBUG
10397c478bd9Sstevel@tonic-gate 			r4_dup_check(rp, vfsp);
10407c478bd9Sstevel@tonic-gate #endif
10417c478bd9Sstevel@tonic-gate 			if (rp->r_freef != NULL) {
10427c478bd9Sstevel@tonic-gate 				mutex_enter(&rp4freelist_lock);
10437c478bd9Sstevel@tonic-gate 				/*
10447c478bd9Sstevel@tonic-gate 				 * If the rnode is on the freelist,
10457c478bd9Sstevel@tonic-gate 				 * then remove it and use that reference
10467c478bd9Sstevel@tonic-gate 				 * as the new reference.  Otherwise,
10477c478bd9Sstevel@tonic-gate 				 * need to increment the reference count.
10487c478bd9Sstevel@tonic-gate 				 */
10497c478bd9Sstevel@tonic-gate 				if (rp->r_freef != NULL) {
10507c478bd9Sstevel@tonic-gate 					rp4_rmfree(rp);
10517c478bd9Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
10527c478bd9Sstevel@tonic-gate 				} else {
10537c478bd9Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
10547c478bd9Sstevel@tonic-gate 					VN_HOLD(vp);
10557c478bd9Sstevel@tonic-gate 				}
10567c478bd9Sstevel@tonic-gate 			} else
10577c478bd9Sstevel@tonic-gate 				VN_HOLD(vp);
10587c478bd9Sstevel@tonic-gate 
10597c478bd9Sstevel@tonic-gate 			/*
10607c478bd9Sstevel@tonic-gate 			 * if root vnode, set v_flag to indicate that
10617c478bd9Sstevel@tonic-gate 			 */
10627c478bd9Sstevel@tonic-gate 			if (isrootfh(fh, rp)) {
10637c478bd9Sstevel@tonic-gate 				if (!(vp->v_flag & VROOT)) {
10647c478bd9Sstevel@tonic-gate 					mutex_enter(&vp->v_lock);
10657c478bd9Sstevel@tonic-gate 					vp->v_flag |= VROOT;
10667c478bd9Sstevel@tonic-gate 					mutex_exit(&vp->v_lock);
10677c478bd9Sstevel@tonic-gate 				}
10687c478bd9Sstevel@tonic-gate 			}
10697c478bd9Sstevel@tonic-gate 			return (rp);
10707c478bd9Sstevel@tonic-gate 		}
10717c478bd9Sstevel@tonic-gate 	}
10727c478bd9Sstevel@tonic-gate 	return (NULL);
10737c478bd9Sstevel@tonic-gate }
10747c478bd9Sstevel@tonic-gate 
10757c478bd9Sstevel@tonic-gate /*
10767c478bd9Sstevel@tonic-gate  * Lookup an rnode by fhandle. Just a wrapper for r4find()
10777c478bd9Sstevel@tonic-gate  * that assumes the caller hasn't already got the lock
10787c478bd9Sstevel@tonic-gate  * on the hash bucket.
10797c478bd9Sstevel@tonic-gate  */
10807c478bd9Sstevel@tonic-gate rnode4_t *
10817c478bd9Sstevel@tonic-gate r4find_unlocked(nfs4_sharedfh_t *fh, struct vfs *vfsp)
10827c478bd9Sstevel@tonic-gate {
10837c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
10847c478bd9Sstevel@tonic-gate 	int index;
10857c478bd9Sstevel@tonic-gate 
10867c478bd9Sstevel@tonic-gate 	index = rtable4hash(fh);
10877c478bd9Sstevel@tonic-gate 	rw_enter(&rtable4[index].r_lock, RW_READER);
10887c478bd9Sstevel@tonic-gate 	rp = r4find(&rtable4[index], fh, vfsp);
10897c478bd9Sstevel@tonic-gate 	rw_exit(&rtable4[index].r_lock);
10907c478bd9Sstevel@tonic-gate 
10917c478bd9Sstevel@tonic-gate 	return (rp);
10927c478bd9Sstevel@tonic-gate }
10937c478bd9Sstevel@tonic-gate 
10947c478bd9Sstevel@tonic-gate /*
10957c478bd9Sstevel@tonic-gate  * Return 1 if there is a active vnode belonging to this vfs in the
10967c478bd9Sstevel@tonic-gate  * rtable4 cache.
10977c478bd9Sstevel@tonic-gate  *
10987c478bd9Sstevel@tonic-gate  * Several of these checks are done without holding the usual
10997c478bd9Sstevel@tonic-gate  * locks.  This is safe because destroy_rtable(), rp_addfree(),
11007c478bd9Sstevel@tonic-gate  * etc. will redo the necessary checks before actually destroying
11017c478bd9Sstevel@tonic-gate  * any rnodes.
11027c478bd9Sstevel@tonic-gate  */
11037c478bd9Sstevel@tonic-gate int
11047c478bd9Sstevel@tonic-gate check_rtable4(struct vfs *vfsp)
11057c478bd9Sstevel@tonic-gate {
11067c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
11077c478bd9Sstevel@tonic-gate 	vnode_t *vp;
11087c478bd9Sstevel@tonic-gate 	char *busy = NULL;
11097c478bd9Sstevel@tonic-gate 	int index;
11107c478bd9Sstevel@tonic-gate 
11117c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
11127c478bd9Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
11137c478bd9Sstevel@tonic-gate 
11147c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
11157c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
11167c478bd9Sstevel@tonic-gate 		    rp = rp->r_hashf) {
11177c478bd9Sstevel@tonic-gate 
11187c478bd9Sstevel@tonic-gate 			vp = RTOV4(rp);
11197c478bd9Sstevel@tonic-gate 			if (vp->v_vfsp == vfsp) {
11207c478bd9Sstevel@tonic-gate 				if (rp->r_freef == NULL) {
11217c478bd9Sstevel@tonic-gate 					busy = "not on free list";
11227c478bd9Sstevel@tonic-gate 				} else if (nfs4_has_pages(vp) &&
1123b9238976Sth 				    (rp->r_flags & R4DIRTY)) {
11247c478bd9Sstevel@tonic-gate 					busy = "dirty pages";
11257c478bd9Sstevel@tonic-gate 				} else if (rp->r_count > 0) {
11267c478bd9Sstevel@tonic-gate 					busy = "r_count > 0";
11277c478bd9Sstevel@tonic-gate 				}
11287c478bd9Sstevel@tonic-gate 
11297c478bd9Sstevel@tonic-gate 				if (busy != NULL) {
11307c478bd9Sstevel@tonic-gate #ifdef DEBUG
11317c478bd9Sstevel@tonic-gate 					char *path;
11327c478bd9Sstevel@tonic-gate 
11337c478bd9Sstevel@tonic-gate 					path = fn_path(rp->r_svnode.sv_name);
11347c478bd9Sstevel@tonic-gate 					NFS4_DEBUG(nfs4_rnode_debug,
11357c478bd9Sstevel@tonic-gate 					    (CE_NOTE, "check_rtable4: " "%s %s",
11367c478bd9Sstevel@tonic-gate 					    path, busy));
11377c478bd9Sstevel@tonic-gate 					kmem_free(path, strlen(path)+1);
11387c478bd9Sstevel@tonic-gate #endif
11397c478bd9Sstevel@tonic-gate 					rw_exit(&rtable4[index].r_lock);
11407c478bd9Sstevel@tonic-gate 					return (1);
11417c478bd9Sstevel@tonic-gate 				}
11427c478bd9Sstevel@tonic-gate 			}
11437c478bd9Sstevel@tonic-gate 		}
11447c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
11457c478bd9Sstevel@tonic-gate 	}
11467c478bd9Sstevel@tonic-gate 	return (0);
11477c478bd9Sstevel@tonic-gate }
11487c478bd9Sstevel@tonic-gate 
11497c478bd9Sstevel@tonic-gate /*
11507c478bd9Sstevel@tonic-gate  * Destroy inactive vnodes from the hash queues which
11517c478bd9Sstevel@tonic-gate  * belong to this vfs. All of the vnodes should be inactive.
1152b9238976Sth  * It is essential that we destroy all rnodes in case of
11537c478bd9Sstevel@tonic-gate  * forced unmount as well as in normal unmount case.
11547c478bd9Sstevel@tonic-gate  */
11557c478bd9Sstevel@tonic-gate 
11567c478bd9Sstevel@tonic-gate void
11577c478bd9Sstevel@tonic-gate destroy_rtable4(struct vfs *vfsp, cred_t *cr)
11587c478bd9Sstevel@tonic-gate {
11597c478bd9Sstevel@tonic-gate 	int index;
11607c478bd9Sstevel@tonic-gate 	vnode_t *vp;
11617c478bd9Sstevel@tonic-gate 	rnode4_t *rp, *r_hashf, *rlist;
11627c478bd9Sstevel@tonic-gate 
11637c478bd9Sstevel@tonic-gate 	rlist = NULL;
11647c478bd9Sstevel@tonic-gate 
11657c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
11667c478bd9Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_WRITER);
11677c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
11687c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
11697c478bd9Sstevel@tonic-gate 		    rp = r_hashf) {
11707c478bd9Sstevel@tonic-gate 			/* save the hash pointer before destroying */
11717c478bd9Sstevel@tonic-gate 			r_hashf = rp->r_hashf;
11727c478bd9Sstevel@tonic-gate 
11737c478bd9Sstevel@tonic-gate 			vp = RTOV4(rp);
11747c478bd9Sstevel@tonic-gate 			if (vp->v_vfsp == vfsp) {
11757c478bd9Sstevel@tonic-gate 				mutex_enter(&rp4freelist_lock);
11767c478bd9Sstevel@tonic-gate 				if (rp->r_freef != NULL) {
11777c478bd9Sstevel@tonic-gate 					rp4_rmfree(rp);
11787c478bd9Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
11797c478bd9Sstevel@tonic-gate 					rp4_rmhash_locked(rp);
11807c478bd9Sstevel@tonic-gate 					rp->r_hashf = rlist;
11817c478bd9Sstevel@tonic-gate 					rlist = rp;
11827c478bd9Sstevel@tonic-gate 				} else
11837c478bd9Sstevel@tonic-gate 					mutex_exit(&rp4freelist_lock);
11847c478bd9Sstevel@tonic-gate 			}
11857c478bd9Sstevel@tonic-gate 		}
11867c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
11877c478bd9Sstevel@tonic-gate 	}
11887c478bd9Sstevel@tonic-gate 
11897c478bd9Sstevel@tonic-gate 	for (rp = rlist; rp != NULL; rp = r_hashf) {
11907c478bd9Sstevel@tonic-gate 		r_hashf = rp->r_hashf;
11917c478bd9Sstevel@tonic-gate 		/*
11927c478bd9Sstevel@tonic-gate 		 * This call to rp4_addfree will end up destroying the
11937c478bd9Sstevel@tonic-gate 		 * rnode, but in a safe way with the appropriate set
11947c478bd9Sstevel@tonic-gate 		 * of checks done.
11957c478bd9Sstevel@tonic-gate 		 */
11967c478bd9Sstevel@tonic-gate 		rp4_addfree(rp, cr);
11977c478bd9Sstevel@tonic-gate 	}
11987c478bd9Sstevel@tonic-gate }
11997c478bd9Sstevel@tonic-gate 
12007c478bd9Sstevel@tonic-gate /*
12017c478bd9Sstevel@tonic-gate  * This routine destroys all the resources of an rnode
12027c478bd9Sstevel@tonic-gate  * and finally the rnode itself.
12037c478bd9Sstevel@tonic-gate  */
12047c478bd9Sstevel@tonic-gate static void
12057c478bd9Sstevel@tonic-gate destroy_rnode4(rnode4_t *rp)
12067c478bd9Sstevel@tonic-gate {
12077c478bd9Sstevel@tonic-gate 	vnode_t *vp;
12087c478bd9Sstevel@tonic-gate 	vfs_t *vfsp;
12097c478bd9Sstevel@tonic-gate 
12107c478bd9Sstevel@tonic-gate 	ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE);
12117c478bd9Sstevel@tonic-gate 
12127c478bd9Sstevel@tonic-gate 	vp = RTOV4(rp);
12137c478bd9Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
12147c478bd9Sstevel@tonic-gate 
12157c478bd9Sstevel@tonic-gate 	uninit_rnode4(rp);
12167c478bd9Sstevel@tonic-gate 	atomic_add_long((ulong_t *)&rnode4_new, -1);
12177c478bd9Sstevel@tonic-gate #ifdef DEBUG
12187c478bd9Sstevel@tonic-gate 	clstat4_debug.nrnode.value.ui64--;
12197c478bd9Sstevel@tonic-gate #endif
12207c478bd9Sstevel@tonic-gate 	kmem_cache_free(rnode4_cache, rp);
12217c478bd9Sstevel@tonic-gate 	vn_invalid(vp);
12227c478bd9Sstevel@tonic-gate 	vn_free(vp);
12237c478bd9Sstevel@tonic-gate 	VFS_RELE(vfsp);
12247c478bd9Sstevel@tonic-gate }
12257c478bd9Sstevel@tonic-gate 
12267c478bd9Sstevel@tonic-gate /*
12277c478bd9Sstevel@tonic-gate  * Invalidate the attributes on all rnodes forcing the next getattr
12287c478bd9Sstevel@tonic-gate  * to go over the wire.  Used to flush stale uid and gid mappings.
12297c478bd9Sstevel@tonic-gate  * Maybe done on a per vfsp, or all rnodes (vfsp == NULL)
12307c478bd9Sstevel@tonic-gate  */
12317c478bd9Sstevel@tonic-gate void
12327c478bd9Sstevel@tonic-gate nfs4_rnode_invalidate(struct vfs *vfsp)
12337c478bd9Sstevel@tonic-gate {
12347c478bd9Sstevel@tonic-gate 	int index;
12357c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
12367c478bd9Sstevel@tonic-gate 	vnode_t *vp;
12377c478bd9Sstevel@tonic-gate 
12387c478bd9Sstevel@tonic-gate 	/*
12397c478bd9Sstevel@tonic-gate 	 * Walk the hash queues looking for rnodes.
12407c478bd9Sstevel@tonic-gate 	 */
12417c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
12427c478bd9Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
12437c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
12447c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
12457c478bd9Sstevel@tonic-gate 		    rp = rp->r_hashf) {
12467c478bd9Sstevel@tonic-gate 			vp = RTOV4(rp);
12477c478bd9Sstevel@tonic-gate 			if (vfsp != NULL && vp->v_vfsp != vfsp)
12487c478bd9Sstevel@tonic-gate 				continue;
12497c478bd9Sstevel@tonic-gate 
12507c478bd9Sstevel@tonic-gate 			if (!mutex_tryenter(&rp->r_statelock))
12517c478bd9Sstevel@tonic-gate 				continue;
12527c478bd9Sstevel@tonic-gate 
12537c478bd9Sstevel@tonic-gate 			/*
12547c478bd9Sstevel@tonic-gate 			 * Expire the attributes by resetting the change
12557c478bd9Sstevel@tonic-gate 			 * and attr timeout.
12567c478bd9Sstevel@tonic-gate 			 */
12577c478bd9Sstevel@tonic-gate 			rp->r_change = 0;
12587c478bd9Sstevel@tonic-gate 			PURGE_ATTRCACHE4_LOCKED(rp);
12597c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statelock);
12607c478bd9Sstevel@tonic-gate 		}
12617c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
12627c478bd9Sstevel@tonic-gate 	}
12637c478bd9Sstevel@tonic-gate }
12647c478bd9Sstevel@tonic-gate 
12657c478bd9Sstevel@tonic-gate /*
12667c478bd9Sstevel@tonic-gate  * Flush all vnodes in this (or every) vfs.
12677c478bd9Sstevel@tonic-gate  * Used by nfs_sync and by nfs_unmount.
12687c478bd9Sstevel@tonic-gate  */
12697c478bd9Sstevel@tonic-gate void
12707c478bd9Sstevel@tonic-gate r4flush(struct vfs *vfsp, cred_t *cr)
12717c478bd9Sstevel@tonic-gate {
12727c478bd9Sstevel@tonic-gate 	int index;
12737c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
12747c478bd9Sstevel@tonic-gate 	vnode_t *vp, **vplist;
12757c478bd9Sstevel@tonic-gate 	long num, cnt;
12767c478bd9Sstevel@tonic-gate 
12777c478bd9Sstevel@tonic-gate 	/*
12787c478bd9Sstevel@tonic-gate 	 * Check to see whether there is anything to do.
12797c478bd9Sstevel@tonic-gate 	 */
12807c478bd9Sstevel@tonic-gate 	num = rnode4_new;
12817c478bd9Sstevel@tonic-gate 	if (num == 0)
12827c478bd9Sstevel@tonic-gate 		return;
12837c478bd9Sstevel@tonic-gate 
12847c478bd9Sstevel@tonic-gate 	/*
12857c478bd9Sstevel@tonic-gate 	 * Allocate a slot for all currently active rnodes on the
12867c478bd9Sstevel@tonic-gate 	 * supposition that they all may need flushing.
12877c478bd9Sstevel@tonic-gate 	 */
12887c478bd9Sstevel@tonic-gate 	vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP);
12897c478bd9Sstevel@tonic-gate 	cnt = 0;
12907c478bd9Sstevel@tonic-gate 
12917c478bd9Sstevel@tonic-gate 	/*
12927c478bd9Sstevel@tonic-gate 	 * Walk the hash queues looking for rnodes with page
12937c478bd9Sstevel@tonic-gate 	 * lists associated with them.  Make a list of these
12947c478bd9Sstevel@tonic-gate 	 * files.
12957c478bd9Sstevel@tonic-gate 	 */
12967c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
12977c478bd9Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
12987c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
12997c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
13007c478bd9Sstevel@tonic-gate 		    rp = rp->r_hashf) {
13017c478bd9Sstevel@tonic-gate 			vp = RTOV4(rp);
13027c478bd9Sstevel@tonic-gate 			/*
13037c478bd9Sstevel@tonic-gate 			 * Don't bother sync'ing a vp if it
13047c478bd9Sstevel@tonic-gate 			 * is part of virtual swap device or
13057c478bd9Sstevel@tonic-gate 			 * if VFS is read-only
13067c478bd9Sstevel@tonic-gate 			 */
13077c478bd9Sstevel@tonic-gate 			if (IS_SWAPVP(vp) || vn_is_readonly(vp))
13087c478bd9Sstevel@tonic-gate 				continue;
13097c478bd9Sstevel@tonic-gate 			/*
13107c478bd9Sstevel@tonic-gate 			 * If flushing all mounted file systems or
13117c478bd9Sstevel@tonic-gate 			 * the vnode belongs to this vfs, has pages
13127c478bd9Sstevel@tonic-gate 			 * and is marked as either dirty or mmap'd,
13137c478bd9Sstevel@tonic-gate 			 * hold and add this vnode to the list of
13147c478bd9Sstevel@tonic-gate 			 * vnodes to flush.
13157c478bd9Sstevel@tonic-gate 			 */
13167c478bd9Sstevel@tonic-gate 			if ((vfsp == NULL || vp->v_vfsp == vfsp) &&
13177c478bd9Sstevel@tonic-gate 			    nfs4_has_pages(vp) &&
13187c478bd9Sstevel@tonic-gate 			    ((rp->r_flags & R4DIRTY) || rp->r_mapcnt > 0)) {
13197c478bd9Sstevel@tonic-gate 				VN_HOLD(vp);
13207c478bd9Sstevel@tonic-gate 				vplist[cnt++] = vp;
13217c478bd9Sstevel@tonic-gate 				if (cnt == num) {
13227c478bd9Sstevel@tonic-gate 					rw_exit(&rtable4[index].r_lock);
13237c478bd9Sstevel@tonic-gate 					goto toomany;
13247c478bd9Sstevel@tonic-gate 				}
13257c478bd9Sstevel@tonic-gate 			}
13267c478bd9Sstevel@tonic-gate 		}
13277c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
13287c478bd9Sstevel@tonic-gate 	}
13297c478bd9Sstevel@tonic-gate toomany:
13307c478bd9Sstevel@tonic-gate 
13317c478bd9Sstevel@tonic-gate 	/*
13327c478bd9Sstevel@tonic-gate 	 * Flush and release all of the files on the list.
13337c478bd9Sstevel@tonic-gate 	 */
13347c478bd9Sstevel@tonic-gate 	while (cnt-- > 0) {
13357c478bd9Sstevel@tonic-gate 		vp = vplist[cnt];
1336da6c28aaSamw 		(void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL);
13377c478bd9Sstevel@tonic-gate 		VN_RELE(vp);
13387c478bd9Sstevel@tonic-gate 	}
13397c478bd9Sstevel@tonic-gate 
13407c478bd9Sstevel@tonic-gate 	/*
13417c478bd9Sstevel@tonic-gate 	 * Free the space allocated to hold the list.
13427c478bd9Sstevel@tonic-gate 	 */
13437c478bd9Sstevel@tonic-gate 	kmem_free(vplist, num * sizeof (*vplist));
13447c478bd9Sstevel@tonic-gate }
13457c478bd9Sstevel@tonic-gate 
13467c478bd9Sstevel@tonic-gate int
13477c478bd9Sstevel@tonic-gate nfs4_free_data_reclaim(rnode4_t *rp)
13487c478bd9Sstevel@tonic-gate {
13497c478bd9Sstevel@tonic-gate 	char *contents;
13507c478bd9Sstevel@tonic-gate 	vnode_t *xattr;
13517c478bd9Sstevel@tonic-gate 	int size;
13527c478bd9Sstevel@tonic-gate 	vsecattr_t *vsp;
13537c478bd9Sstevel@tonic-gate 	int freed;
13547c478bd9Sstevel@tonic-gate 	bool_t rdc = FALSE;
13557c478bd9Sstevel@tonic-gate 
13567c478bd9Sstevel@tonic-gate 	/*
13577c478bd9Sstevel@tonic-gate 	 * Free any held caches which may
13587c478bd9Sstevel@tonic-gate 	 * be associated with this rnode.
13597c478bd9Sstevel@tonic-gate 	 */
13607c478bd9Sstevel@tonic-gate 	mutex_enter(&rp->r_statelock);
13617c478bd9Sstevel@tonic-gate 	if (rp->r_dir != NULL)
13627c478bd9Sstevel@tonic-gate 		rdc = TRUE;
13637c478bd9Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
13647c478bd9Sstevel@tonic-gate 	size = rp->r_symlink.size;
13657c478bd9Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
13667c478bd9Sstevel@tonic-gate 	vsp = rp->r_secattr;
13677c478bd9Sstevel@tonic-gate 	rp->r_secattr = NULL;
13687c478bd9Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
13697c478bd9Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
13707c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
13717c478bd9Sstevel@tonic-gate 
13727c478bd9Sstevel@tonic-gate 	/*
13737c478bd9Sstevel@tonic-gate 	 * Free the access cache entries.
13747c478bd9Sstevel@tonic-gate 	 */
13757c478bd9Sstevel@tonic-gate 	freed = nfs4_access_purge_rp(rp);
13767c478bd9Sstevel@tonic-gate 
13777c478bd9Sstevel@tonic-gate 	if (rdc == FALSE && contents == NULL && vsp == NULL && xattr == NULL)
13787c478bd9Sstevel@tonic-gate 		return (freed);
13797c478bd9Sstevel@tonic-gate 
13807c478bd9Sstevel@tonic-gate 	/*
13817c478bd9Sstevel@tonic-gate 	 * Free the readdir cache entries, incompletely if we can't block.
13827c478bd9Sstevel@tonic-gate 	 */
13837c478bd9Sstevel@tonic-gate 	nfs4_purge_rddir_cache(RTOV4(rp));
13847c478bd9Sstevel@tonic-gate 
13857c478bd9Sstevel@tonic-gate 	/*
13867c478bd9Sstevel@tonic-gate 	 * Free the symbolic link cache.
13877c478bd9Sstevel@tonic-gate 	 */
13887c478bd9Sstevel@tonic-gate 	if (contents != NULL) {
13897c478bd9Sstevel@tonic-gate 
13907c478bd9Sstevel@tonic-gate 		kmem_free((void *)contents, size);
13917c478bd9Sstevel@tonic-gate 	}
13927c478bd9Sstevel@tonic-gate 
13937c478bd9Sstevel@tonic-gate 	/*
13947c478bd9Sstevel@tonic-gate 	 * Free any cached ACL.
13957c478bd9Sstevel@tonic-gate 	 */
13967c478bd9Sstevel@tonic-gate 	if (vsp != NULL)
13977c478bd9Sstevel@tonic-gate 		nfs4_acl_free_cache(vsp);
13987c478bd9Sstevel@tonic-gate 
13997c478bd9Sstevel@tonic-gate 	/*
14007c478bd9Sstevel@tonic-gate 	 * Release the xattr directory vnode
14017c478bd9Sstevel@tonic-gate 	 */
14027c478bd9Sstevel@tonic-gate 	if (xattr != NULL)
14037c478bd9Sstevel@tonic-gate 		VN_RELE(xattr);
14047c478bd9Sstevel@tonic-gate 
14057c478bd9Sstevel@tonic-gate 	return (1);
14067c478bd9Sstevel@tonic-gate }
14077c478bd9Sstevel@tonic-gate 
14087c478bd9Sstevel@tonic-gate static int
14097c478bd9Sstevel@tonic-gate nfs4_active_data_reclaim(rnode4_t *rp)
14107c478bd9Sstevel@tonic-gate {
14117c478bd9Sstevel@tonic-gate 	char *contents;
14127c478bd9Sstevel@tonic-gate 	vnode_t *xattr;
14137c478bd9Sstevel@tonic-gate 	int size;
14147c478bd9Sstevel@tonic-gate 	vsecattr_t *vsp;
14157c478bd9Sstevel@tonic-gate 	int freed;
14167c478bd9Sstevel@tonic-gate 	bool_t rdc = FALSE;
14177c478bd9Sstevel@tonic-gate 
14187c478bd9Sstevel@tonic-gate 	/*
14197c478bd9Sstevel@tonic-gate 	 * Free any held credentials and caches which
14207c478bd9Sstevel@tonic-gate 	 * may be associated with this rnode.
14217c478bd9Sstevel@tonic-gate 	 */
14227c478bd9Sstevel@tonic-gate 	if (!mutex_tryenter(&rp->r_statelock))
14237c478bd9Sstevel@tonic-gate 		return (0);
14247c478bd9Sstevel@tonic-gate 	contents = rp->r_symlink.contents;
14257c478bd9Sstevel@tonic-gate 	size = rp->r_symlink.size;
14267c478bd9Sstevel@tonic-gate 	rp->r_symlink.contents = NULL;
14277c478bd9Sstevel@tonic-gate 	vsp = rp->r_secattr;
14287c478bd9Sstevel@tonic-gate 	rp->r_secattr = NULL;
14297c478bd9Sstevel@tonic-gate 	if (rp->r_dir != NULL)
14307c478bd9Sstevel@tonic-gate 		rdc = TRUE;
14317c478bd9Sstevel@tonic-gate 	xattr = rp->r_xattr_dir;
14327c478bd9Sstevel@tonic-gate 	rp->r_xattr_dir = NULL;
14337c478bd9Sstevel@tonic-gate 	mutex_exit(&rp->r_statelock);
14347c478bd9Sstevel@tonic-gate 
14357c478bd9Sstevel@tonic-gate 	/*
14367c478bd9Sstevel@tonic-gate 	 * Free the access cache entries.
14377c478bd9Sstevel@tonic-gate 	 */
14387c478bd9Sstevel@tonic-gate 	freed = nfs4_access_purge_rp(rp);
14397c478bd9Sstevel@tonic-gate 
14407c478bd9Sstevel@tonic-gate 	if (contents == NULL && vsp == NULL && rdc == FALSE && xattr == NULL)
14417c478bd9Sstevel@tonic-gate 		return (freed);
14427c478bd9Sstevel@tonic-gate 
14437c478bd9Sstevel@tonic-gate 	/*
14447c478bd9Sstevel@tonic-gate 	 * Free the symbolic link cache.
14457c478bd9Sstevel@tonic-gate 	 */
14467c478bd9Sstevel@tonic-gate 	if (contents != NULL) {
14477c478bd9Sstevel@tonic-gate 
14487c478bd9Sstevel@tonic-gate 		kmem_free((void *)contents, size);
14497c478bd9Sstevel@tonic-gate 	}
14507c478bd9Sstevel@tonic-gate 
14517c478bd9Sstevel@tonic-gate 	/*
14527c478bd9Sstevel@tonic-gate 	 * Free any cached ACL.
14537c478bd9Sstevel@tonic-gate 	 */
14547c478bd9Sstevel@tonic-gate 	if (vsp != NULL)
14557c478bd9Sstevel@tonic-gate 		nfs4_acl_free_cache(vsp);
14567c478bd9Sstevel@tonic-gate 
14577c478bd9Sstevel@tonic-gate 	nfs4_purge_rddir_cache(RTOV4(rp));
14587c478bd9Sstevel@tonic-gate 
14597c478bd9Sstevel@tonic-gate 	/*
14607c478bd9Sstevel@tonic-gate 	 * Release the xattr directory vnode
14617c478bd9Sstevel@tonic-gate 	 */
14627c478bd9Sstevel@tonic-gate 	if (xattr != NULL)
14637c478bd9Sstevel@tonic-gate 		VN_RELE(xattr);
14647c478bd9Sstevel@tonic-gate 
14657c478bd9Sstevel@tonic-gate 	return (1);
14667c478bd9Sstevel@tonic-gate }
14677c478bd9Sstevel@tonic-gate 
14687c478bd9Sstevel@tonic-gate static int
14697c478bd9Sstevel@tonic-gate nfs4_free_reclaim(void)
14707c478bd9Sstevel@tonic-gate {
14717c478bd9Sstevel@tonic-gate 	int freed;
14727c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
14737c478bd9Sstevel@tonic-gate 
14747c478bd9Sstevel@tonic-gate #ifdef DEBUG
14757c478bd9Sstevel@tonic-gate 	clstat4_debug.f_reclaim.value.ui64++;
14767c478bd9Sstevel@tonic-gate #endif
14777c478bd9Sstevel@tonic-gate 	freed = 0;
14787c478bd9Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
14797c478bd9Sstevel@tonic-gate 	rp = rp4freelist;
14807c478bd9Sstevel@tonic-gate 	if (rp != NULL) {
14817c478bd9Sstevel@tonic-gate 		do {
14827c478bd9Sstevel@tonic-gate 			if (nfs4_free_data_reclaim(rp))
14837c478bd9Sstevel@tonic-gate 				freed = 1;
14847c478bd9Sstevel@tonic-gate 		} while ((rp = rp->r_freef) != rp4freelist);
14857c478bd9Sstevel@tonic-gate 	}
14867c478bd9Sstevel@tonic-gate 	mutex_exit(&rp4freelist_lock);
14877c478bd9Sstevel@tonic-gate 	return (freed);
14887c478bd9Sstevel@tonic-gate }
14897c478bd9Sstevel@tonic-gate 
14907c478bd9Sstevel@tonic-gate static int
14917c478bd9Sstevel@tonic-gate nfs4_active_reclaim(void)
14927c478bd9Sstevel@tonic-gate {
14937c478bd9Sstevel@tonic-gate 	int freed;
14947c478bd9Sstevel@tonic-gate 	int index;
14957c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
14967c478bd9Sstevel@tonic-gate 
14977c478bd9Sstevel@tonic-gate #ifdef DEBUG
14987c478bd9Sstevel@tonic-gate 	clstat4_debug.a_reclaim.value.ui64++;
14997c478bd9Sstevel@tonic-gate #endif
15007c478bd9Sstevel@tonic-gate 	freed = 0;
15017c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
15027c478bd9Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
15037c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
15047c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
15057c478bd9Sstevel@tonic-gate 		    rp = rp->r_hashf) {
15067c478bd9Sstevel@tonic-gate 			if (nfs4_active_data_reclaim(rp))
15077c478bd9Sstevel@tonic-gate 				freed = 1;
15087c478bd9Sstevel@tonic-gate 		}
15097c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
15107c478bd9Sstevel@tonic-gate 	}
15117c478bd9Sstevel@tonic-gate 	return (freed);
15127c478bd9Sstevel@tonic-gate }
15137c478bd9Sstevel@tonic-gate 
15147c478bd9Sstevel@tonic-gate static int
15157c478bd9Sstevel@tonic-gate nfs4_rnode_reclaim(void)
15167c478bd9Sstevel@tonic-gate {
15177c478bd9Sstevel@tonic-gate 	int freed;
15187c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
15197c478bd9Sstevel@tonic-gate 	vnode_t *vp;
15207c478bd9Sstevel@tonic-gate 
15217c478bd9Sstevel@tonic-gate #ifdef DEBUG
15227c478bd9Sstevel@tonic-gate 	clstat4_debug.r_reclaim.value.ui64++;
15237c478bd9Sstevel@tonic-gate #endif
15247c478bd9Sstevel@tonic-gate 	freed = 0;
15257c478bd9Sstevel@tonic-gate 	mutex_enter(&rp4freelist_lock);
15267c478bd9Sstevel@tonic-gate 	while ((rp = rp4freelist) != NULL) {
15277c478bd9Sstevel@tonic-gate 		rp4_rmfree(rp);
15287c478bd9Sstevel@tonic-gate 		mutex_exit(&rp4freelist_lock);
15297c478bd9Sstevel@tonic-gate 		if (rp->r_flags & R4HASHED) {
15307c478bd9Sstevel@tonic-gate 			vp = RTOV4(rp);
15317c478bd9Sstevel@tonic-gate 			rw_enter(&rp->r_hashq->r_lock, RW_WRITER);
15327c478bd9Sstevel@tonic-gate 			mutex_enter(&vp->v_lock);
15337c478bd9Sstevel@tonic-gate 			if (vp->v_count > 1) {
15347c478bd9Sstevel@tonic-gate 				vp->v_count--;
15357c478bd9Sstevel@tonic-gate 				mutex_exit(&vp->v_lock);
15367c478bd9Sstevel@tonic-gate 				rw_exit(&rp->r_hashq->r_lock);
15377c478bd9Sstevel@tonic-gate 				mutex_enter(&rp4freelist_lock);
15387c478bd9Sstevel@tonic-gate 				continue;
15397c478bd9Sstevel@tonic-gate 			}
15407c478bd9Sstevel@tonic-gate 			mutex_exit(&vp->v_lock);
15417c478bd9Sstevel@tonic-gate 			rp4_rmhash_locked(rp);
15427c478bd9Sstevel@tonic-gate 			rw_exit(&rp->r_hashq->r_lock);
15437c478bd9Sstevel@tonic-gate 		}
15447c478bd9Sstevel@tonic-gate 		/*
15457c478bd9Sstevel@tonic-gate 		 * This call to rp_addfree will end up destroying the
15467c478bd9Sstevel@tonic-gate 		 * rnode, but in a safe way with the appropriate set
15477c478bd9Sstevel@tonic-gate 		 * of checks done.
15487c478bd9Sstevel@tonic-gate 		 */
15497c478bd9Sstevel@tonic-gate 		rp4_addfree(rp, CRED());
15507c478bd9Sstevel@tonic-gate 		mutex_enter(&rp4freelist_lock);
15517c478bd9Sstevel@tonic-gate 	}
15527c478bd9Sstevel@tonic-gate 	mutex_exit(&rp4freelist_lock);
15537c478bd9Sstevel@tonic-gate 	return (freed);
15547c478bd9Sstevel@tonic-gate }
15557c478bd9Sstevel@tonic-gate 
15567c478bd9Sstevel@tonic-gate /*ARGSUSED*/
15577c478bd9Sstevel@tonic-gate static void
15587c478bd9Sstevel@tonic-gate nfs4_reclaim(void *cdrarg)
15597c478bd9Sstevel@tonic-gate {
15607c478bd9Sstevel@tonic-gate #ifdef DEBUG
15617c478bd9Sstevel@tonic-gate 	clstat4_debug.reclaim.value.ui64++;
15627c478bd9Sstevel@tonic-gate #endif
15637c478bd9Sstevel@tonic-gate 	if (nfs4_free_reclaim())
15647c478bd9Sstevel@tonic-gate 		return;
15657c478bd9Sstevel@tonic-gate 
15667c478bd9Sstevel@tonic-gate 	if (nfs4_active_reclaim())
15677c478bd9Sstevel@tonic-gate 		return;
15687c478bd9Sstevel@tonic-gate 
15697c478bd9Sstevel@tonic-gate 	(void) nfs4_rnode_reclaim();
15707c478bd9Sstevel@tonic-gate }
15717c478bd9Sstevel@tonic-gate 
15727c478bd9Sstevel@tonic-gate /*
15737c478bd9Sstevel@tonic-gate  * Returns the clientid4 to use for the given mntinfo4.  Note that the
15747c478bd9Sstevel@tonic-gate  * clientid can change if the caller drops mi_recovlock.
15757c478bd9Sstevel@tonic-gate  */
15767c478bd9Sstevel@tonic-gate 
15777c478bd9Sstevel@tonic-gate clientid4
15787c478bd9Sstevel@tonic-gate mi2clientid(mntinfo4_t *mi)
15797c478bd9Sstevel@tonic-gate {
15807c478bd9Sstevel@tonic-gate 	nfs4_server_t	*sp;
15817c478bd9Sstevel@tonic-gate 	clientid4	clientid = 0;
15827c478bd9Sstevel@tonic-gate 
15837c478bd9Sstevel@tonic-gate 	/* this locks down sp if it is found */
15847c478bd9Sstevel@tonic-gate 	sp = find_nfs4_server(mi);
15857c478bd9Sstevel@tonic-gate 	if (sp != NULL) {
15867c478bd9Sstevel@tonic-gate 		clientid = sp->clientid;
15877c478bd9Sstevel@tonic-gate 		mutex_exit(&sp->s_lock);
15887c478bd9Sstevel@tonic-gate 		nfs4_server_rele(sp);
15897c478bd9Sstevel@tonic-gate 	}
15907c478bd9Sstevel@tonic-gate 	return (clientid);
15917c478bd9Sstevel@tonic-gate }
15927c478bd9Sstevel@tonic-gate 
15937c478bd9Sstevel@tonic-gate /*
15947c478bd9Sstevel@tonic-gate  * Return the current lease time for the server associated with the given
15957c478bd9Sstevel@tonic-gate  * file.  Note that the lease time could change immediately after this
15967c478bd9Sstevel@tonic-gate  * call.
15977c478bd9Sstevel@tonic-gate  */
15987c478bd9Sstevel@tonic-gate 
15997c478bd9Sstevel@tonic-gate time_t
16007c478bd9Sstevel@tonic-gate r2lease_time(rnode4_t *rp)
16017c478bd9Sstevel@tonic-gate {
16027c478bd9Sstevel@tonic-gate 	nfs4_server_t	*sp;
16037c478bd9Sstevel@tonic-gate 	time_t		lease_time;
16047c478bd9Sstevel@tonic-gate 	mntinfo4_t	*mi = VTOMI4(RTOV4(rp));
16057c478bd9Sstevel@tonic-gate 
16067c478bd9Sstevel@tonic-gate 	(void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0);
16077c478bd9Sstevel@tonic-gate 
16087c478bd9Sstevel@tonic-gate 	/* this locks down sp if it is found */
16097c478bd9Sstevel@tonic-gate 	sp = find_nfs4_server(VTOMI4(RTOV4(rp)));
16107c478bd9Sstevel@tonic-gate 
16117c478bd9Sstevel@tonic-gate 	if (VTOMI4(RTOV4(rp))->mi_vfsp->vfs_flag & VFS_UNMOUNTED) {
16127c478bd9Sstevel@tonic-gate 		if (sp != NULL) {
16137c478bd9Sstevel@tonic-gate 			mutex_exit(&sp->s_lock);
16147c478bd9Sstevel@tonic-gate 			nfs4_server_rele(sp);
16157c478bd9Sstevel@tonic-gate 		}
16167c478bd9Sstevel@tonic-gate 		nfs_rw_exit(&mi->mi_recovlock);
16177c478bd9Sstevel@tonic-gate 		return (1);		/* 1 second */
16187c478bd9Sstevel@tonic-gate 	}
16197c478bd9Sstevel@tonic-gate 
16207c478bd9Sstevel@tonic-gate 	ASSERT(sp != NULL);
16217c478bd9Sstevel@tonic-gate 
16227c478bd9Sstevel@tonic-gate 	lease_time = sp->s_lease_time;
16237c478bd9Sstevel@tonic-gate 
16247c478bd9Sstevel@tonic-gate 	mutex_exit(&sp->s_lock);
16257c478bd9Sstevel@tonic-gate 	nfs4_server_rele(sp);
16267c478bd9Sstevel@tonic-gate 	nfs_rw_exit(&mi->mi_recovlock);
16277c478bd9Sstevel@tonic-gate 
16287c478bd9Sstevel@tonic-gate 	return (lease_time);
16297c478bd9Sstevel@tonic-gate }
16307c478bd9Sstevel@tonic-gate 
16317c478bd9Sstevel@tonic-gate /*
16327c478bd9Sstevel@tonic-gate  * Return a list with information about all the known open instances for
16337c478bd9Sstevel@tonic-gate  * a filesystem. The caller must call r4releopenlist() when done with the
16347c478bd9Sstevel@tonic-gate  * list.
16357c478bd9Sstevel@tonic-gate  *
16367c478bd9Sstevel@tonic-gate  * We are safe at looking at os_valid and os_pending_close across dropping
16377c478bd9Sstevel@tonic-gate  * the 'os_sync_lock' to count up the number of open streams and then
16387c478bd9Sstevel@tonic-gate  * allocate memory for the osp list due to:
16397c478bd9Sstevel@tonic-gate  *	-Looking at os_pending_close is safe since this routine is
16407c478bd9Sstevel@tonic-gate  *	only called via recovery, and os_pending_close can only be set via
16417c478bd9Sstevel@tonic-gate  *	a non-recovery operation (which are all blocked when recovery
16427c478bd9Sstevel@tonic-gate  *	is active).
16437c478bd9Sstevel@tonic-gate  *
16447c478bd9Sstevel@tonic-gate  *	-Examining os_valid is safe since non-recovery operations, which
16457c478bd9Sstevel@tonic-gate  *	could potentially switch os_valid to 0, are blocked (via
16467c478bd9Sstevel@tonic-gate  *	nfs4_start_fop) and recovery is single-threaded per mntinfo4_t
16477c478bd9Sstevel@tonic-gate  *	(which means we are the only recovery thread potentially acting
16487c478bd9Sstevel@tonic-gate  *	on this open stream).
16497c478bd9Sstevel@tonic-gate  */
16507c478bd9Sstevel@tonic-gate 
16517c478bd9Sstevel@tonic-gate nfs4_opinst_t *
16527c478bd9Sstevel@tonic-gate r4mkopenlist(mntinfo4_t *mi)
16537c478bd9Sstevel@tonic-gate {
16547c478bd9Sstevel@tonic-gate 	nfs4_opinst_t *reopenlist, *rep;
16557c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
16567c478bd9Sstevel@tonic-gate 	vnode_t *vp;
16577c478bd9Sstevel@tonic-gate 	vfs_t *vfsp = mi->mi_vfsp;
16587c478bd9Sstevel@tonic-gate 	int numosp;
16597c478bd9Sstevel@tonic-gate 	nfs4_open_stream_t *osp;
16607c478bd9Sstevel@tonic-gate 	int index;
16617c478bd9Sstevel@tonic-gate 	open_delegation_type4 dtype;
16627c478bd9Sstevel@tonic-gate 	int hold_vnode;
16637c478bd9Sstevel@tonic-gate 
16647c478bd9Sstevel@tonic-gate 	reopenlist = NULL;
16657c478bd9Sstevel@tonic-gate 
16667c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
16677c478bd9Sstevel@tonic-gate 		rw_enter(&rtable4[index].r_lock, RW_READER);
16687c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
16697c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
16707c478bd9Sstevel@tonic-gate 		    rp = rp->r_hashf) {
16717c478bd9Sstevel@tonic-gate 
16727c478bd9Sstevel@tonic-gate 			vp = RTOV4(rp);
16737c478bd9Sstevel@tonic-gate 			if (vp->v_vfsp != vfsp)
16747c478bd9Sstevel@tonic-gate 				continue;
16757c478bd9Sstevel@tonic-gate 			hold_vnode = 0;
16767c478bd9Sstevel@tonic-gate 
16777c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_os_lock);
16787c478bd9Sstevel@tonic-gate 
16797c478bd9Sstevel@tonic-gate 			/* Count the number of valid open_streams of the file */
16807c478bd9Sstevel@tonic-gate 			numosp = 0;
16817c478bd9Sstevel@tonic-gate 			for (osp = list_head(&rp->r_open_streams); osp != NULL;
16827c478bd9Sstevel@tonic-gate 			    osp = list_next(&rp->r_open_streams, osp)) {
16837c478bd9Sstevel@tonic-gate 				mutex_enter(&osp->os_sync_lock);
16847c478bd9Sstevel@tonic-gate 				if (osp->os_valid && !osp->os_pending_close)
16857c478bd9Sstevel@tonic-gate 					numosp++;
16867c478bd9Sstevel@tonic-gate 				mutex_exit(&osp->os_sync_lock);
16877c478bd9Sstevel@tonic-gate 			}
16887c478bd9Sstevel@tonic-gate 
16897c478bd9Sstevel@tonic-gate 			/* Fill in the valid open streams per vp */
16907c478bd9Sstevel@tonic-gate 			if (numosp > 0) {
16917c478bd9Sstevel@tonic-gate 				int j;
16927c478bd9Sstevel@tonic-gate 
16937c478bd9Sstevel@tonic-gate 				hold_vnode = 1;
16947c478bd9Sstevel@tonic-gate 
16957c478bd9Sstevel@tonic-gate 				/*
16967c478bd9Sstevel@tonic-gate 				 * Add a new open instance to the list
16977c478bd9Sstevel@tonic-gate 				 */
16987c478bd9Sstevel@tonic-gate 				rep = kmem_zalloc(sizeof (*reopenlist),
1699b9238976Sth 				    KM_SLEEP);
17007c478bd9Sstevel@tonic-gate 				rep->re_next = reopenlist;
17017c478bd9Sstevel@tonic-gate 				reopenlist = rep;
17027c478bd9Sstevel@tonic-gate 
17037c478bd9Sstevel@tonic-gate 				rep->re_vp = vp;
17047c478bd9Sstevel@tonic-gate 				rep->re_osp = kmem_zalloc(
1705b9238976Sth 				    numosp * sizeof (*(rep->re_osp)),
1706b9238976Sth 				    KM_SLEEP);
17077c478bd9Sstevel@tonic-gate 				rep->re_numosp = numosp;
17087c478bd9Sstevel@tonic-gate 
17097c478bd9Sstevel@tonic-gate 				j = 0;
17107c478bd9Sstevel@tonic-gate 				for (osp = list_head(&rp->r_open_streams);
17117c478bd9Sstevel@tonic-gate 				    osp != NULL;
17127c478bd9Sstevel@tonic-gate 				    osp = list_next(&rp->r_open_streams, osp)) {
17137c478bd9Sstevel@tonic-gate 
17147c478bd9Sstevel@tonic-gate 					mutex_enter(&osp->os_sync_lock);
17157c478bd9Sstevel@tonic-gate 					if (osp->os_valid &&
17167c478bd9Sstevel@tonic-gate 					    !osp->os_pending_close) {
17177c478bd9Sstevel@tonic-gate 						osp->os_ref_count++;
17187c478bd9Sstevel@tonic-gate 						rep->re_osp[j] = osp;
17197c478bd9Sstevel@tonic-gate 						j++;
17207c478bd9Sstevel@tonic-gate 					}
17217c478bd9Sstevel@tonic-gate 					mutex_exit(&osp->os_sync_lock);
17227c478bd9Sstevel@tonic-gate 				}
17237c478bd9Sstevel@tonic-gate 				/*
17247c478bd9Sstevel@tonic-gate 				 * Assuming valid osp(s) stays valid between
17257c478bd9Sstevel@tonic-gate 				 * the time obtaining j and numosp.
17267c478bd9Sstevel@tonic-gate 				 */
17277c478bd9Sstevel@tonic-gate 				ASSERT(j == numosp);
17287c478bd9Sstevel@tonic-gate 			}
17297c478bd9Sstevel@tonic-gate 
17307c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_os_lock);
17317c478bd9Sstevel@tonic-gate 			/* do this here to keep v_lock > r_os_lock */
17327c478bd9Sstevel@tonic-gate 			if (hold_vnode)
17337c478bd9Sstevel@tonic-gate 				VN_HOLD(vp);
17347c478bd9Sstevel@tonic-gate 			mutex_enter(&rp->r_statev4_lock);
17357c478bd9Sstevel@tonic-gate 			if (rp->r_deleg_type != OPEN_DELEGATE_NONE) {
17367c478bd9Sstevel@tonic-gate 				/*
17377c478bd9Sstevel@tonic-gate 				 * If this rnode holds a delegation,
17387c478bd9Sstevel@tonic-gate 				 * but if there are no valid open streams,
17397c478bd9Sstevel@tonic-gate 				 * then just discard the delegation
17407c478bd9Sstevel@tonic-gate 				 * without doing delegreturn.
17417c478bd9Sstevel@tonic-gate 				 */
17427c478bd9Sstevel@tonic-gate 				if (numosp > 0)
17437c478bd9Sstevel@tonic-gate 					rp->r_deleg_needs_recovery =
1744b9238976Sth 					    rp->r_deleg_type;
17457c478bd9Sstevel@tonic-gate 			}
17467c478bd9Sstevel@tonic-gate 			/* Save the delegation type for use outside the lock */
17477c478bd9Sstevel@tonic-gate 			dtype = rp->r_deleg_type;
17487c478bd9Sstevel@tonic-gate 			mutex_exit(&rp->r_statev4_lock);
17497c478bd9Sstevel@tonic-gate 
17507c478bd9Sstevel@tonic-gate 			/*
17517c478bd9Sstevel@tonic-gate 			 * If we have a delegation then get rid of it.
17527c478bd9Sstevel@tonic-gate 			 * We've set rp->r_deleg_needs_recovery so we have
17537c478bd9Sstevel@tonic-gate 			 * enough information to recover.
17547c478bd9Sstevel@tonic-gate 			 */
17557c478bd9Sstevel@tonic-gate 			if (dtype != OPEN_DELEGATE_NONE) {
17567c478bd9Sstevel@tonic-gate 				(void) nfs4delegreturn(rp, NFS4_DR_DISCARD);
17577c478bd9Sstevel@tonic-gate 			}
17587c478bd9Sstevel@tonic-gate 		}
17597c478bd9Sstevel@tonic-gate 		rw_exit(&rtable4[index].r_lock);
17607c478bd9Sstevel@tonic-gate 	}
17617c478bd9Sstevel@tonic-gate 	return (reopenlist);
17627c478bd9Sstevel@tonic-gate }
17637c478bd9Sstevel@tonic-gate 
17647c478bd9Sstevel@tonic-gate /*
17657c478bd9Sstevel@tonic-gate  * Release the list of open instance references.
17667c478bd9Sstevel@tonic-gate  */
17677c478bd9Sstevel@tonic-gate 
17687c478bd9Sstevel@tonic-gate void
17697c478bd9Sstevel@tonic-gate r4releopenlist(nfs4_opinst_t *reopenp)
17707c478bd9Sstevel@tonic-gate {
17717c478bd9Sstevel@tonic-gate 	nfs4_opinst_t *rep, *next;
17727c478bd9Sstevel@tonic-gate 	int i;
17737c478bd9Sstevel@tonic-gate 
17747c478bd9Sstevel@tonic-gate 	for (rep = reopenp; rep; rep = next) {
17757c478bd9Sstevel@tonic-gate 		next = rep->re_next;
17767c478bd9Sstevel@tonic-gate 
17777c478bd9Sstevel@tonic-gate 		for (i = 0; i < rep->re_numosp; i++)
1778b9238976Sth 			open_stream_rele(rep->re_osp[i], VTOR4(rep->re_vp));
17797c478bd9Sstevel@tonic-gate 
17807c478bd9Sstevel@tonic-gate 		VN_RELE(rep->re_vp);
17817c478bd9Sstevel@tonic-gate 		kmem_free(rep->re_osp,
17827c478bd9Sstevel@tonic-gate 		    rep->re_numosp * sizeof (*(rep->re_osp)));
17837c478bd9Sstevel@tonic-gate 
17847c478bd9Sstevel@tonic-gate 		kmem_free(rep, sizeof (*rep));
17857c478bd9Sstevel@tonic-gate 	}
17867c478bd9Sstevel@tonic-gate }
17877c478bd9Sstevel@tonic-gate 
17887c478bd9Sstevel@tonic-gate int
17897c478bd9Sstevel@tonic-gate nfs4_rnode_init(void)
17907c478bd9Sstevel@tonic-gate {
17917c478bd9Sstevel@tonic-gate 	ulong_t nrnode4_max;
17927c478bd9Sstevel@tonic-gate 	int i;
17937c478bd9Sstevel@tonic-gate 
17947c478bd9Sstevel@tonic-gate 	/*
17957c478bd9Sstevel@tonic-gate 	 * Compute the size of the rnode4 hash table
17967c478bd9Sstevel@tonic-gate 	 */
17977c478bd9Sstevel@tonic-gate 	if (nrnode <= 0)
17987c478bd9Sstevel@tonic-gate 		nrnode = ncsize;
17997c478bd9Sstevel@tonic-gate 	nrnode4_max =
18007c478bd9Sstevel@tonic-gate 	    (ulong_t)((kmem_maxavail() >> 2) / sizeof (struct rnode4));
18017c478bd9Sstevel@tonic-gate 	if (nrnode > nrnode4_max || (nrnode == 0 && ncsize == 0)) {
18027c478bd9Sstevel@tonic-gate 		zcmn_err(GLOBAL_ZONEID, CE_NOTE,
18037c478bd9Sstevel@tonic-gate 		    "setting nrnode to max value of %ld", nrnode4_max);
18047c478bd9Sstevel@tonic-gate 		nrnode = nrnode4_max;
18057c478bd9Sstevel@tonic-gate 	}
18067c478bd9Sstevel@tonic-gate 	rtable4size = 1 << highbit(nrnode / rnode4_hashlen);
18077c478bd9Sstevel@tonic-gate 	rtable4mask = rtable4size - 1;
18087c478bd9Sstevel@tonic-gate 
18097c478bd9Sstevel@tonic-gate 	/*
18107c478bd9Sstevel@tonic-gate 	 * Allocate and initialize the hash buckets
18117c478bd9Sstevel@tonic-gate 	 */
18127c478bd9Sstevel@tonic-gate 	rtable4 = kmem_alloc(rtable4size * sizeof (*rtable4), KM_SLEEP);
18137c478bd9Sstevel@tonic-gate 	for (i = 0; i < rtable4size; i++) {
18147c478bd9Sstevel@tonic-gate 		rtable4[i].r_hashf = (rnode4_t *)(&rtable4[i]);
18157c478bd9Sstevel@tonic-gate 		rtable4[i].r_hashb = (rnode4_t *)(&rtable4[i]);
18167c478bd9Sstevel@tonic-gate 		rw_init(&rtable4[i].r_lock, NULL, RW_DEFAULT, NULL);
18177c478bd9Sstevel@tonic-gate 	}
18187c478bd9Sstevel@tonic-gate 
18197c478bd9Sstevel@tonic-gate 	rnode4_cache = kmem_cache_create("rnode4_cache", sizeof (rnode4_t),
18207c478bd9Sstevel@tonic-gate 	    0, NULL, NULL, nfs4_reclaim, NULL, NULL, 0);
18217c478bd9Sstevel@tonic-gate 
18227c478bd9Sstevel@tonic-gate 	return (0);
18237c478bd9Sstevel@tonic-gate }
18247c478bd9Sstevel@tonic-gate 
18257c478bd9Sstevel@tonic-gate int
18267c478bd9Sstevel@tonic-gate nfs4_rnode_fini(void)
18277c478bd9Sstevel@tonic-gate {
18287c478bd9Sstevel@tonic-gate 	int i;
18297c478bd9Sstevel@tonic-gate 
18307c478bd9Sstevel@tonic-gate 	/*
18317c478bd9Sstevel@tonic-gate 	 * Deallocate the rnode hash queues
18327c478bd9Sstevel@tonic-gate 	 */
18337c478bd9Sstevel@tonic-gate 	kmem_cache_destroy(rnode4_cache);
18347c478bd9Sstevel@tonic-gate 
18357c478bd9Sstevel@tonic-gate 	for (i = 0; i < rtable4size; i++)
18367c478bd9Sstevel@tonic-gate 		rw_destroy(&rtable4[i].r_lock);
18377c478bd9Sstevel@tonic-gate 
18387c478bd9Sstevel@tonic-gate 	kmem_free(rtable4, rtable4size * sizeof (*rtable4));
18397c478bd9Sstevel@tonic-gate 
18407c478bd9Sstevel@tonic-gate 	return (0);
18417c478bd9Sstevel@tonic-gate }
18427c478bd9Sstevel@tonic-gate 
18437c478bd9Sstevel@tonic-gate /*
18447c478bd9Sstevel@tonic-gate  * Return non-zero if the given filehandle refers to the root filehandle
18457c478bd9Sstevel@tonic-gate  * for the given rnode.
18467c478bd9Sstevel@tonic-gate  */
18477c478bd9Sstevel@tonic-gate 
18487c478bd9Sstevel@tonic-gate static int
18497c478bd9Sstevel@tonic-gate isrootfh(nfs4_sharedfh_t *fh, rnode4_t *rp)
18507c478bd9Sstevel@tonic-gate {
18517c478bd9Sstevel@tonic-gate 	int isroot;
18527c478bd9Sstevel@tonic-gate 
18537c478bd9Sstevel@tonic-gate 	isroot = 0;
18547c478bd9Sstevel@tonic-gate 	if (SFH4_SAME(VTOMI4(RTOV4(rp))->mi_rootfh, fh))
18557c478bd9Sstevel@tonic-gate 		isroot = 1;
18567c478bd9Sstevel@tonic-gate 
18577c478bd9Sstevel@tonic-gate 	return (isroot);
18587c478bd9Sstevel@tonic-gate }
18597c478bd9Sstevel@tonic-gate 
1860b9238976Sth /*
1861b9238976Sth  * The r4_stub_* routines assume that the rnode is newly activated, and
1862b9238976Sth  * that the caller either holds the hash bucket r_lock for this rnode as
1863b9238976Sth  * RW_WRITER, or holds r_statelock.
1864b9238976Sth  */
1865b9238976Sth static void
1866b9238976Sth r4_stub_set(rnode4_t *rp, nfs4_stub_type_t type)
1867b9238976Sth {
1868b9238976Sth 	vnode_t *vp = RTOV4(rp);
1869b9238976Sth 	krwlock_t *hash_lock = &rp->r_hashq->r_lock;
1870b9238976Sth 
1871b9238976Sth 	ASSERT(RW_WRITE_HELD(hash_lock) || MUTEX_HELD(&rp->r_statelock));
1872b9238976Sth 
1873b9238976Sth 	rp->r_stub_type = type;
1874b9238976Sth 
1875b9238976Sth 	/*
1876b9238976Sth 	 * Safely switch this vnode to the trigger vnodeops.
1877b9238976Sth 	 *
1878b9238976Sth 	 * Currently, we don't ever switch a trigger vnode back to using
1879b9238976Sth 	 * "regular" v4 vnodeops. NFS4_STUB_NONE is only used to note that
1880b9238976Sth 	 * a new v4 object is not a trigger, and it will already have the
1881b9238976Sth 	 * correct v4 vnodeops by default. So, no "else" case required here.
1882b9238976Sth 	 */
1883b9238976Sth 	if (type != NFS4_STUB_NONE)
1884b9238976Sth 		vn_setops(vp, nfs4_trigger_vnodeops);
1885b9238976Sth }
1886b9238976Sth 
1887b9238976Sth void
1888b9238976Sth r4_stub_mirrormount(rnode4_t *rp)
1889b9238976Sth {
1890b9238976Sth 	r4_stub_set(rp, NFS4_STUB_MIRRORMOUNT);
1891b9238976Sth }
1892b9238976Sth 
1893b9238976Sth void
1894b9238976Sth r4_stub_none(rnode4_t *rp)
1895b9238976Sth {
1896b9238976Sth 	r4_stub_set(rp, NFS4_STUB_NONE);
1897b9238976Sth }
1898b9238976Sth 
18997c478bd9Sstevel@tonic-gate #ifdef DEBUG
19007c478bd9Sstevel@tonic-gate 
19017c478bd9Sstevel@tonic-gate /*
19027c478bd9Sstevel@tonic-gate  * Look in the rnode table for other rnodes that have the same filehandle.
19037c478bd9Sstevel@tonic-gate  * Assume the lock is held for the hash chain of checkrp
19047c478bd9Sstevel@tonic-gate  */
19057c478bd9Sstevel@tonic-gate 
19067c478bd9Sstevel@tonic-gate static void
19077c478bd9Sstevel@tonic-gate r4_dup_check(rnode4_t *checkrp, vfs_t *vfsp)
19087c478bd9Sstevel@tonic-gate {
19097c478bd9Sstevel@tonic-gate 	rnode4_t *rp;
19107c478bd9Sstevel@tonic-gate 	vnode_t *tvp;
19117c478bd9Sstevel@tonic-gate 	nfs4_fhandle_t fh, fh2;
19127c478bd9Sstevel@tonic-gate 	int index;
19137c478bd9Sstevel@tonic-gate 
19147c478bd9Sstevel@tonic-gate 	if (!r4_check_for_dups)
19157c478bd9Sstevel@tonic-gate 		return;
19167c478bd9Sstevel@tonic-gate 
19177c478bd9Sstevel@tonic-gate 	ASSERT(RW_LOCK_HELD(&checkrp->r_hashq->r_lock));
19187c478bd9Sstevel@tonic-gate 
19197c478bd9Sstevel@tonic-gate 	sfh4_copyval(checkrp->r_fh, &fh);
19207c478bd9Sstevel@tonic-gate 
19217c478bd9Sstevel@tonic-gate 	for (index = 0; index < rtable4size; index++) {
19227c478bd9Sstevel@tonic-gate 
19237c478bd9Sstevel@tonic-gate 		if (&rtable4[index] != checkrp->r_hashq)
19247c478bd9Sstevel@tonic-gate 			rw_enter(&rtable4[index].r_lock, RW_READER);
19257c478bd9Sstevel@tonic-gate 
19267c478bd9Sstevel@tonic-gate 		for (rp = rtable4[index].r_hashf;
19277c478bd9Sstevel@tonic-gate 		    rp != (rnode4_t *)(&rtable4[index]);
19287c478bd9Sstevel@tonic-gate 		    rp = rp->r_hashf) {
19297c478bd9Sstevel@tonic-gate 
19307c478bd9Sstevel@tonic-gate 			if (rp == checkrp)
19317c478bd9Sstevel@tonic-gate 				continue;
19327c478bd9Sstevel@tonic-gate 
19337c478bd9Sstevel@tonic-gate 			tvp = RTOV4(rp);
19347c478bd9Sstevel@tonic-gate 			if (tvp->v_vfsp != vfsp)
19357c478bd9Sstevel@tonic-gate 				continue;
19367c478bd9Sstevel@tonic-gate 
19377c478bd9Sstevel@tonic-gate 			sfh4_copyval(rp->r_fh, &fh2);
19387c478bd9Sstevel@tonic-gate 			if (nfs4cmpfhandle(&fh, &fh2) == 0) {
19397c478bd9Sstevel@tonic-gate 				cmn_err(CE_PANIC, "rnodes with same fs, fh "
19407c478bd9Sstevel@tonic-gate 				    "(%p, %p)", (void *)checkrp, (void *)rp);
19417c478bd9Sstevel@tonic-gate 			}
19427c478bd9Sstevel@tonic-gate 		}
19437c478bd9Sstevel@tonic-gate 
19447c478bd9Sstevel@tonic-gate 		if (&rtable4[index] != checkrp->r_hashq)
19457c478bd9Sstevel@tonic-gate 			rw_exit(&rtable4[index].r_lock);
19467c478bd9Sstevel@tonic-gate 	}
19477c478bd9Sstevel@tonic-gate }
19487c478bd9Sstevel@tonic-gate 
19497c478bd9Sstevel@tonic-gate #endif /* DEBUG */
1950