17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 59f9e2373Sjwahlig * Common Development and Distribution License (the "License"). 69f9e2373Sjwahlig * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2272dd5e52SMarcel Telka * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* 277c478bd9Sstevel@tonic-gate * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 287c478bd9Sstevel@tonic-gate * All Rights Reserved 297c478bd9Sstevel@tonic-gate */ 307c478bd9Sstevel@tonic-gate 31f5654033SAlexander Eremin /* 32f5654033SAlexander Eremin * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 33f5654033SAlexander Eremin */ 347c478bd9Sstevel@tonic-gate 357c478bd9Sstevel@tonic-gate #include <sys/param.h> 367c478bd9Sstevel@tonic-gate #include <sys/types.h> 377c478bd9Sstevel@tonic-gate #include <sys/systm.h> 387c478bd9Sstevel@tonic-gate #include <sys/cred.h> 397c478bd9Sstevel@tonic-gate #include <sys/proc.h> 407c478bd9Sstevel@tonic-gate #include <sys/user.h> 417c478bd9Sstevel@tonic-gate #include <sys/time.h> 427c478bd9Sstevel@tonic-gate #include <sys/buf.h> 437c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 447c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 457c478bd9Sstevel@tonic-gate #include <sys/socket.h> 467c478bd9Sstevel@tonic-gate #include <sys/uio.h> 477c478bd9Sstevel@tonic-gate #include <sys/tiuser.h> 487c478bd9Sstevel@tonic-gate #include <sys/swap.h> 497c478bd9Sstevel@tonic-gate #include <sys/errno.h> 507c478bd9Sstevel@tonic-gate #include <sys/debug.h> 517c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 527c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 537c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 547c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 557c478bd9Sstevel@tonic-gate #include <sys/session.h> 567c478bd9Sstevel@tonic-gate #include <sys/dnlc.h> 577c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 587c478bd9Sstevel@tonic-gate #include <sys/acl.h> 597c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 607c478bd9Sstevel@tonic-gate #include <sys/pathname.h> 617c478bd9Sstevel@tonic-gate #include <sys/flock.h> 627c478bd9Sstevel@tonic-gate #include <sys/dirent.h> 637c478bd9Sstevel@tonic-gate #include <sys/flock.h> 647c478bd9Sstevel@tonic-gate #include <sys/callb.h> 656962f5b8SThomas Haynes #include <sys/sdt.h> 667c478bd9Sstevel@tonic-gate 67f8bbc571SPavel Filipensky #include <vm/pvn.h> 68f8bbc571SPavel Filipensky 697c478bd9Sstevel@tonic-gate #include <rpc/types.h> 707c478bd9Sstevel@tonic-gate #include <rpc/xdr.h> 717c478bd9Sstevel@tonic-gate #include <rpc/auth.h> 727c478bd9Sstevel@tonic-gate #include <rpc/rpcsec_gss.h> 737c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate #include <nfs/nfs.h> 767c478bd9Sstevel@tonic-gate #include <nfs/nfs_clnt.h> 777c478bd9Sstevel@tonic-gate #include <nfs/nfs_acl.h> 787c478bd9Sstevel@tonic-gate 797c478bd9Sstevel@tonic-gate #include <nfs/nfs4.h> 807c478bd9Sstevel@tonic-gate #include <nfs/rnode4.h> 817c478bd9Sstevel@tonic-gate #include <nfs/nfs4_clnt.h> 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate /* 847c478bd9Sstevel@tonic-gate * The hash queues for the access to active and cached rnodes 857c478bd9Sstevel@tonic-gate * are organized as doubly linked lists. A reader/writer lock 867c478bd9Sstevel@tonic-gate * for each hash bucket is used to control access and to synchronize 877c478bd9Sstevel@tonic-gate * lookups, additions, and deletions from the hash queue. 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * The rnode freelist is organized as a doubly linked list with 907c478bd9Sstevel@tonic-gate * a head pointer. Additions and deletions are synchronized via 917c478bd9Sstevel@tonic-gate * a single mutex. 927c478bd9Sstevel@tonic-gate * 937c478bd9Sstevel@tonic-gate * In order to add an rnode to the free list, it must be hashed into 947c478bd9Sstevel@tonic-gate * a hash queue and the exclusive lock to the hash queue be held. 957c478bd9Sstevel@tonic-gate * If an rnode is not hashed into a hash queue, then it is destroyed 967c478bd9Sstevel@tonic-gate * because it represents no valuable information that can be reused 977c478bd9Sstevel@tonic-gate * about the file. The exclusive lock to the hash queue must be 987c478bd9Sstevel@tonic-gate * held in order to prevent a lookup in the hash queue from finding 997c478bd9Sstevel@tonic-gate * the rnode and using it and assuming that the rnode is not on the 1007c478bd9Sstevel@tonic-gate * freelist. The lookup in the hash queue will have the hash queue 1017c478bd9Sstevel@tonic-gate * locked, either exclusive or shared. 1027c478bd9Sstevel@tonic-gate * 1037c478bd9Sstevel@tonic-gate * The vnode reference count for each rnode is not allowed to drop 1047c478bd9Sstevel@tonic-gate * below 1. This prevents external entities, such as the VM 1057c478bd9Sstevel@tonic-gate * subsystem, from acquiring references to vnodes already on the 1067c478bd9Sstevel@tonic-gate * freelist and then trying to place them back on the freelist 1077c478bd9Sstevel@tonic-gate * when their reference is released. This means that the when an 1087c478bd9Sstevel@tonic-gate * rnode is looked up in the hash queues, then either the rnode 109da6c28aaSamw * is removed from the freelist and that reference is transferred to 1107c478bd9Sstevel@tonic-gate * the new reference or the vnode reference count must be incremented 1117c478bd9Sstevel@tonic-gate * accordingly. The mutex for the freelist must be held in order to 1127c478bd9Sstevel@tonic-gate * accurately test to see if the rnode is on the freelist or not. 1137c478bd9Sstevel@tonic-gate * The hash queue lock might be held shared and it is possible that 1147c478bd9Sstevel@tonic-gate * two different threads may race to remove the rnode from the 1157c478bd9Sstevel@tonic-gate * freelist. This race can be resolved by holding the mutex for the 1167c478bd9Sstevel@tonic-gate * freelist. Please note that the mutex for the freelist does not 1177c478bd9Sstevel@tonic-gate * need to be held if the rnode is not on the freelist. It can not be 1187c478bd9Sstevel@tonic-gate * placed on the freelist due to the requirement that the thread 1197c478bd9Sstevel@tonic-gate * putting the rnode on the freelist must hold the exclusive lock 1207c478bd9Sstevel@tonic-gate * to the hash queue and the thread doing the lookup in the hash 1217c478bd9Sstevel@tonic-gate * queue is holding either a shared or exclusive lock to the hash 1227c478bd9Sstevel@tonic-gate * queue. 1237c478bd9Sstevel@tonic-gate * 1247c478bd9Sstevel@tonic-gate * The lock ordering is: 1257c478bd9Sstevel@tonic-gate * 1267c478bd9Sstevel@tonic-gate * hash bucket lock -> vnode lock 1272d1fef97Ssamf * hash bucket lock -> freelist lock -> r_statelock 1287c478bd9Sstevel@tonic-gate */ 1297c478bd9Sstevel@tonic-gate r4hashq_t *rtable4; 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate static kmutex_t rp4freelist_lock; 1327c478bd9Sstevel@tonic-gate static rnode4_t *rp4freelist = NULL; 1337c478bd9Sstevel@tonic-gate static long rnode4_new = 0; 1347c478bd9Sstevel@tonic-gate int rtable4size; 1357c478bd9Sstevel@tonic-gate static int rtable4mask; 1367c478bd9Sstevel@tonic-gate static struct kmem_cache *rnode4_cache; 1377c478bd9Sstevel@tonic-gate static int rnode4_hashlen = 4; 1387c478bd9Sstevel@tonic-gate 1397c478bd9Sstevel@tonic-gate static void r4inactive(rnode4_t *, cred_t *); 1407c478bd9Sstevel@tonic-gate static vnode_t *make_rnode4(nfs4_sharedfh_t *, r4hashq_t *, struct vfs *, 1417c478bd9Sstevel@tonic-gate struct vnodeops *, 1427c478bd9Sstevel@tonic-gate int (*)(vnode_t *, page_t *, u_offset_t *, size_t *, int, 1437c478bd9Sstevel@tonic-gate cred_t *), 1447c478bd9Sstevel@tonic-gate int *, cred_t *); 1457c478bd9Sstevel@tonic-gate static void rp4_rmfree(rnode4_t *); 1467c478bd9Sstevel@tonic-gate int nfs4_free_data_reclaim(rnode4_t *); 1477c478bd9Sstevel@tonic-gate static int nfs4_active_data_reclaim(rnode4_t *); 1487c478bd9Sstevel@tonic-gate static int nfs4_free_reclaim(void); 1497c478bd9Sstevel@tonic-gate static int nfs4_active_reclaim(void); 1507c478bd9Sstevel@tonic-gate static int nfs4_rnode_reclaim(void); 1517c478bd9Sstevel@tonic-gate static void nfs4_reclaim(void *); 1527c478bd9Sstevel@tonic-gate static int isrootfh(nfs4_sharedfh_t *, rnode4_t *); 1537c478bd9Sstevel@tonic-gate static void uninit_rnode4(rnode4_t *); 1547c478bd9Sstevel@tonic-gate static void destroy_rnode4(rnode4_t *); 155b9238976Sth static void r4_stub_set(rnode4_t *, nfs4_stub_type_t); 1567c478bd9Sstevel@tonic-gate 1577c478bd9Sstevel@tonic-gate #ifdef DEBUG 1587c478bd9Sstevel@tonic-gate static int r4_check_for_dups = 0; /* Flag to enable dup rnode detection. */ 1597c478bd9Sstevel@tonic-gate static int nfs4_rnode_debug = 0; 1607c478bd9Sstevel@tonic-gate /* if nonzero, kmem_cache_free() rnodes rather than place on freelist */ 1617c478bd9Sstevel@tonic-gate static int nfs4_rnode_nofreelist = 0; 1627c478bd9Sstevel@tonic-gate /* give messages on colliding shared filehandles */ 1637c478bd9Sstevel@tonic-gate static void r4_dup_check(rnode4_t *, vfs_t *); 1647c478bd9Sstevel@tonic-gate #endif 1657c478bd9Sstevel@tonic-gate 1667c478bd9Sstevel@tonic-gate /* 1679f9e2373Sjwahlig * If the vnode has pages, run the list and check for any that are 1689f9e2373Sjwahlig * still dangling. We call this routine before putting an rnode on 1699f9e2373Sjwahlig * the free list. 1709f9e2373Sjwahlig */ 1719f9e2373Sjwahlig static int 1729f9e2373Sjwahlig nfs4_dross_pages(vnode_t *vp) 1739f9e2373Sjwahlig { 1749f9e2373Sjwahlig page_t *pp; 1759f9e2373Sjwahlig kmutex_t *vphm; 1769f9e2373Sjwahlig 1779f9e2373Sjwahlig vphm = page_vnode_mutex(vp); 1789f9e2373Sjwahlig mutex_enter(vphm); 1799f9e2373Sjwahlig if ((pp = vp->v_pages) != NULL) { 1809f9e2373Sjwahlig do { 181f8bbc571SPavel Filipensky if (pp->p_hash != PVN_VPLIST_HASH_TAG && 182f8bbc571SPavel Filipensky pp->p_fsdata != C_NOCOMMIT) { 1839f9e2373Sjwahlig mutex_exit(vphm); 1849f9e2373Sjwahlig return (1); 1859f9e2373Sjwahlig } 1869f9e2373Sjwahlig } while ((pp = pp->p_vpnext) != vp->v_pages); 1879f9e2373Sjwahlig } 1889f9e2373Sjwahlig mutex_exit(vphm); 1899f9e2373Sjwahlig 1909f9e2373Sjwahlig return (0); 1919f9e2373Sjwahlig } 1929f9e2373Sjwahlig 1939f9e2373Sjwahlig /* 1949f9e2373Sjwahlig * Flush any pages left on this rnode. 1957c478bd9Sstevel@tonic-gate */ 1967c478bd9Sstevel@tonic-gate static void 1979f9e2373Sjwahlig r4flushpages(rnode4_t *rp, cred_t *cr) 1987c478bd9Sstevel@tonic-gate { 1997c478bd9Sstevel@tonic-gate vnode_t *vp; 2007c478bd9Sstevel@tonic-gate int error; 2017c478bd9Sstevel@tonic-gate 2027c478bd9Sstevel@tonic-gate /* 2037c478bd9Sstevel@tonic-gate * Before freeing anything, wait until all asynchronous 2047c478bd9Sstevel@tonic-gate * activity is done on this rnode. This will allow all 2057c478bd9Sstevel@tonic-gate * asynchronous read ahead and write behind i/o's to 2067c478bd9Sstevel@tonic-gate * finish. 2077c478bd9Sstevel@tonic-gate */ 2087c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2097c478bd9Sstevel@tonic-gate while (rp->r_count > 0) 2107c478bd9Sstevel@tonic-gate cv_wait(&rp->r_cv, &rp->r_statelock); 2117c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2127c478bd9Sstevel@tonic-gate 2137c478bd9Sstevel@tonic-gate /* 2147c478bd9Sstevel@tonic-gate * Flush and invalidate all pages associated with the vnode. 2157c478bd9Sstevel@tonic-gate */ 2167c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 2177c478bd9Sstevel@tonic-gate if (nfs4_has_pages(vp)) { 2187c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 2197c478bd9Sstevel@tonic-gate if ((rp->r_flags & R4DIRTY) && !rp->r_error) { 220da6c28aaSamw error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, 0, cr, NULL); 2217c478bd9Sstevel@tonic-gate if (error && (error == ENOSPC || error == EDQUOT)) { 2227c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2237c478bd9Sstevel@tonic-gate if (!rp->r_error) 2247c478bd9Sstevel@tonic-gate rp->r_error = error; 2257c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2267c478bd9Sstevel@tonic-gate } 2277c478bd9Sstevel@tonic-gate } 2287c478bd9Sstevel@tonic-gate nfs4_invalidate_pages(vp, (u_offset_t)0, cr); 2297c478bd9Sstevel@tonic-gate } 2309f9e2373Sjwahlig } 2319f9e2373Sjwahlig 2329f9e2373Sjwahlig /* 2339f9e2373Sjwahlig * Free the resources associated with an rnode. 2349f9e2373Sjwahlig */ 2359f9e2373Sjwahlig static void 2369f9e2373Sjwahlig r4inactive(rnode4_t *rp, cred_t *cr) 2379f9e2373Sjwahlig { 2389f9e2373Sjwahlig vnode_t *vp; 2399f9e2373Sjwahlig char *contents; 2409f9e2373Sjwahlig int size; 2419f9e2373Sjwahlig vsecattr_t *vsp; 2429f9e2373Sjwahlig vnode_t *xattr; 2439f9e2373Sjwahlig 2449f9e2373Sjwahlig r4flushpages(rp, cr); 2459f9e2373Sjwahlig 2469f9e2373Sjwahlig vp = RTOV4(rp); 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate /* 2497c478bd9Sstevel@tonic-gate * Free any held caches which may be 2507c478bd9Sstevel@tonic-gate * associated with this rnode. 2517c478bd9Sstevel@tonic-gate */ 2527c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2537c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 2547c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 2557c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 2567c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 2577c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 2587c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 2597c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 2607c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2617c478bd9Sstevel@tonic-gate 2627c478bd9Sstevel@tonic-gate /* 2637c478bd9Sstevel@tonic-gate * Free the access cache entries. 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate (void) nfs4_access_purge_rp(rp); 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate /* 2687c478bd9Sstevel@tonic-gate * Free the readdir cache entries. 2697c478bd9Sstevel@tonic-gate */ 2707c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(vp); 2717c478bd9Sstevel@tonic-gate 2727c478bd9Sstevel@tonic-gate /* 2737c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 2747c478bd9Sstevel@tonic-gate */ 2757c478bd9Sstevel@tonic-gate if (contents != NULL) { 2767c478bd9Sstevel@tonic-gate 2777c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 2787c478bd9Sstevel@tonic-gate } 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate /* 2817c478bd9Sstevel@tonic-gate * Free any cached ACL. 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate if (vsp != NULL) 2847c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 2857c478bd9Sstevel@tonic-gate 2867c478bd9Sstevel@tonic-gate /* 2877c478bd9Sstevel@tonic-gate * Release the cached xattr_dir 2887c478bd9Sstevel@tonic-gate */ 2897c478bd9Sstevel@tonic-gate if (xattr != NULL) 2907c478bd9Sstevel@tonic-gate VN_RELE(xattr); 2917c478bd9Sstevel@tonic-gate } 2927c478bd9Sstevel@tonic-gate 2937c478bd9Sstevel@tonic-gate /* 2947c478bd9Sstevel@tonic-gate * We have seen a case that the fh passed in is for "." which 2957c478bd9Sstevel@tonic-gate * should be a VROOT node, however, the fh is different from the 2967c478bd9Sstevel@tonic-gate * root fh stored in the mntinfo4_t. The invalid fh might be 2977c478bd9Sstevel@tonic-gate * from a misbehaved server and will panic the client system at 2987c478bd9Sstevel@tonic-gate * a later time. To avoid the panic, we drop the bad fh, use 2997c478bd9Sstevel@tonic-gate * the root fh from mntinfo4_t, and print an error message 3007c478bd9Sstevel@tonic-gate * for attention. 3017c478bd9Sstevel@tonic-gate */ 3027c478bd9Sstevel@tonic-gate nfs4_sharedfh_t * 3037c478bd9Sstevel@tonic-gate badrootfh_check(nfs4_sharedfh_t *fh, nfs4_fname_t *nm, mntinfo4_t *mi, 3047c478bd9Sstevel@tonic-gate int *wasbad) 3057c478bd9Sstevel@tonic-gate { 3067c478bd9Sstevel@tonic-gate char *s; 3077c478bd9Sstevel@tonic-gate 3087c478bd9Sstevel@tonic-gate *wasbad = 0; 3097c478bd9Sstevel@tonic-gate s = fn_name(nm); 3107c478bd9Sstevel@tonic-gate ASSERT(strcmp(s, "..") != 0); 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate if ((s[0] == '.' && s[1] == '\0') && fh && 313b9238976Sth !SFH4_SAME(mi->mi_rootfh, fh)) { 3147c478bd9Sstevel@tonic-gate #ifdef DEBUG 3157c478bd9Sstevel@tonic-gate nfs4_fhandle_t fhandle; 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate zcmn_err(mi->mi_zone->zone_id, CE_WARN, 3187c478bd9Sstevel@tonic-gate "Server %s returns a different " 3197c478bd9Sstevel@tonic-gate "root filehandle for the path %s:", 3207c478bd9Sstevel@tonic-gate mi->mi_curr_serv->sv_hostname, 3217c478bd9Sstevel@tonic-gate mi->mi_curr_serv->sv_path); 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate /* print the bad fh */ 3247c478bd9Sstevel@tonic-gate fhandle.fh_len = fh->sfh_fh.nfs_fh4_len; 3257c478bd9Sstevel@tonic-gate bcopy(fh->sfh_fh.nfs_fh4_val, fhandle.fh_buf, 326b9238976Sth fhandle.fh_len); 3277c478bd9Sstevel@tonic-gate nfs4_printfhandle(&fhandle); 3287c478bd9Sstevel@tonic-gate 3297c478bd9Sstevel@tonic-gate /* print mi_rootfh */ 3307c478bd9Sstevel@tonic-gate fhandle.fh_len = mi->mi_rootfh->sfh_fh.nfs_fh4_len; 3317c478bd9Sstevel@tonic-gate bcopy(mi->mi_rootfh->sfh_fh.nfs_fh4_val, fhandle.fh_buf, 332b9238976Sth fhandle.fh_len); 3337c478bd9Sstevel@tonic-gate nfs4_printfhandle(&fhandle); 3347c478bd9Sstevel@tonic-gate #endif 3357c478bd9Sstevel@tonic-gate /* use mi_rootfh instead; fh will be rele by the caller */ 3367c478bd9Sstevel@tonic-gate fh = mi->mi_rootfh; 3377c478bd9Sstevel@tonic-gate *wasbad = 1; 3387c478bd9Sstevel@tonic-gate } 3397c478bd9Sstevel@tonic-gate 3407c478bd9Sstevel@tonic-gate kmem_free(s, MAXNAMELEN); 3417c478bd9Sstevel@tonic-gate return (fh); 3427c478bd9Sstevel@tonic-gate } 3437c478bd9Sstevel@tonic-gate 3447c478bd9Sstevel@tonic-gate void 3457c478bd9Sstevel@tonic-gate r4_do_attrcache(vnode_t *vp, nfs4_ga_res_t *garp, int newnode, 3467c478bd9Sstevel@tonic-gate hrtime_t t, cred_t *cr, int index) 3477c478bd9Sstevel@tonic-gate { 348b9238976Sth int is_stub; 3497c478bd9Sstevel@tonic-gate vattr_t *attr; 3507c478bd9Sstevel@tonic-gate /* 3517c478bd9Sstevel@tonic-gate * Don't add to attrcache if time overflow, but 3527c478bd9Sstevel@tonic-gate * no need to check because either attr is null or the time 3537c478bd9Sstevel@tonic-gate * values in it were processed by nfs4_time_ntov(), which checks 3547c478bd9Sstevel@tonic-gate * for time overflows. 3557c478bd9Sstevel@tonic-gate */ 3567c478bd9Sstevel@tonic-gate attr = garp ? &garp->n4g_va : NULL; 3577c478bd9Sstevel@tonic-gate 3587c478bd9Sstevel@tonic-gate if (attr) { 3597c478bd9Sstevel@tonic-gate if (!newnode) { 3607c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 3617c478bd9Sstevel@tonic-gate #ifdef DEBUG 3627c478bd9Sstevel@tonic-gate if (vp->v_type != attr->va_type && 3637c478bd9Sstevel@tonic-gate vp->v_type != VNON && attr->va_type != VNON) { 3647c478bd9Sstevel@tonic-gate zcmn_err(VTOMI4(vp)->mi_zone->zone_id, CE_WARN, 365b9238976Sth "makenfs4node: type (%d) doesn't " 366b9238976Sth "match type of found node at %p (%d)", 367b9238976Sth attr->va_type, (void *)vp, vp->v_type); 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate #endif 3707c478bd9Sstevel@tonic-gate nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL); 3717c478bd9Sstevel@tonic-gate } else { 3727c478bd9Sstevel@tonic-gate rnode4_t *rp = VTOR4(vp); 3737c478bd9Sstevel@tonic-gate 3747c478bd9Sstevel@tonic-gate vp->v_type = attr->va_type; 3757c478bd9Sstevel@tonic-gate vp->v_rdev = attr->va_rdev; 3767c478bd9Sstevel@tonic-gate 3777c478bd9Sstevel@tonic-gate /* 3787c478bd9Sstevel@tonic-gate * Turn this object into a "stub" object if we 379b9238976Sth * crossed an underlying server fs boundary. 380b9238976Sth * To make this check, during mount we save the 3817c478bd9Sstevel@tonic-gate * fsid of the server object being mounted. 3827c478bd9Sstevel@tonic-gate * Here we compare this object's server fsid 3837c478bd9Sstevel@tonic-gate * with the fsid we saved at mount. If they 3847c478bd9Sstevel@tonic-gate * are different, we crossed server fs boundary. 3857c478bd9Sstevel@tonic-gate * 386b9238976Sth * The stub type is set (or not) at rnode 3877c478bd9Sstevel@tonic-gate * creation time and it never changes for life 388b9238976Sth * of the rnode. 3897c478bd9Sstevel@tonic-gate * 3902f172c55SRobert Thurlow * This stub will be for a mirror-mount, rather than 3912f172c55SRobert Thurlow * a referral (the latter also sets R4SRVSTUB). 3922f172c55SRobert Thurlow * 393b9238976Sth * The stub type is also set during RO failover, 394b9238976Sth * nfs4_remap_file(). 395b9238976Sth * 396b9238976Sth * We don't bother with taking r_state_lock to 397b9238976Sth * set the stub type because this is a new rnode 398b9238976Sth * and we're holding the hash bucket r_lock RW_WRITER. 399b9238976Sth * No other thread could have obtained access 400b9238976Sth * to this rnode. 4017c478bd9Sstevel@tonic-gate */ 402b9238976Sth is_stub = 0; 4037c478bd9Sstevel@tonic-gate if (garp->n4g_fsid_valid) { 404b9238976Sth fattr4_fsid ga_fsid = garp->n4g_fsid; 405b9238976Sth servinfo4_t *svp = rp->r_server; 4067c478bd9Sstevel@tonic-gate 407b9238976Sth rp->r_srv_fsid = ga_fsid; 4087c478bd9Sstevel@tonic-gate 409b9238976Sth (void) nfs_rw_enter_sig(&svp->sv_lock, 410b9238976Sth RW_READER, 0); 411b9238976Sth if (!FATTR4_FSID_EQ(&ga_fsid, &svp->sv_fsid)) 412b9238976Sth is_stub = 1; 413b9238976Sth nfs_rw_exit(&svp->sv_lock); 4147c478bd9Sstevel@tonic-gate } 4157c478bd9Sstevel@tonic-gate 416b9238976Sth if (is_stub) 417b9238976Sth r4_stub_mirrormount(rp); 418b9238976Sth else 419b9238976Sth r4_stub_none(rp); 420b9238976Sth 4217c478bd9Sstevel@tonic-gate /* Can not cache partial attr */ 4227c478bd9Sstevel@tonic-gate if (attr->va_mask == AT_ALL) 4237c478bd9Sstevel@tonic-gate nfs4_attrcache_noinval(vp, garp, t); 4247c478bd9Sstevel@tonic-gate else 4257c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4(vp); 4267c478bd9Sstevel@tonic-gate 4277c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate } else { 4307c478bd9Sstevel@tonic-gate if (newnode) { 4317c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4(vp); 4327c478bd9Sstevel@tonic-gate } 4337c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 4347c478bd9Sstevel@tonic-gate } 4357c478bd9Sstevel@tonic-gate } 4367c478bd9Sstevel@tonic-gate 4377c478bd9Sstevel@tonic-gate /* 4387c478bd9Sstevel@tonic-gate * Find or create an rnode based primarily on filehandle. To be 4397c478bd9Sstevel@tonic-gate * used when dvp (vnode for parent directory) is not available; 4407c478bd9Sstevel@tonic-gate * otherwise, makenfs4node() should be used. 4417c478bd9Sstevel@tonic-gate * 4427c478bd9Sstevel@tonic-gate * The nfs4_fname_t argument *npp is consumed and nulled out. 4437c478bd9Sstevel@tonic-gate */ 4447c478bd9Sstevel@tonic-gate 4457c478bd9Sstevel@tonic-gate vnode_t * 4467c478bd9Sstevel@tonic-gate makenfs4node_by_fh(nfs4_sharedfh_t *sfh, nfs4_sharedfh_t *psfh, 447b9238976Sth nfs4_fname_t **npp, nfs4_ga_res_t *garp, 448b9238976Sth mntinfo4_t *mi, cred_t *cr, hrtime_t t) 4497c478bd9Sstevel@tonic-gate { 4507c478bd9Sstevel@tonic-gate vfs_t *vfsp = mi->mi_vfsp; 4517c478bd9Sstevel@tonic-gate int newnode = 0; 4527c478bd9Sstevel@tonic-gate vnode_t *vp; 4537c478bd9Sstevel@tonic-gate rnode4_t *rp; 4547c478bd9Sstevel@tonic-gate svnode_t *svp; 455bbf2a467SNagakiran Rajashekar nfs4_fname_t *name, *svpname; 4567c478bd9Sstevel@tonic-gate int index; 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate ASSERT(npp && *npp); 4597c478bd9Sstevel@tonic-gate name = *npp; 4607c478bd9Sstevel@tonic-gate *npp = NULL; 4617c478bd9Sstevel@tonic-gate 4627c478bd9Sstevel@tonic-gate index = rtable4hash(sfh); 4637c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 4647c478bd9Sstevel@tonic-gate 4657c478bd9Sstevel@tonic-gate vp = make_rnode4(sfh, &rtable4[index], vfsp, 4667c478bd9Sstevel@tonic-gate nfs4_vnodeops, nfs4_putapage, &newnode, cr); 467bbf2a467SNagakiran Rajashekar 468bbf2a467SNagakiran Rajashekar svp = VTOSV(vp); 469bbf2a467SNagakiran Rajashekar rp = VTOR4(vp); 4707c478bd9Sstevel@tonic-gate if (newnode) { 4717c478bd9Sstevel@tonic-gate svp->sv_forw = svp->sv_back = svp; 4727c478bd9Sstevel@tonic-gate svp->sv_name = name; 4737c478bd9Sstevel@tonic-gate if (psfh != NULL) 4747c478bd9Sstevel@tonic-gate sfh4_hold(psfh); 4757c478bd9Sstevel@tonic-gate svp->sv_dfh = psfh; 4764151f947SPavel Filipensky } else { 477bbf2a467SNagakiran Rajashekar /* 478bbf2a467SNagakiran Rajashekar * It is possible that due to a server 479bbf2a467SNagakiran Rajashekar * side rename fnames have changed. 480bbf2a467SNagakiran Rajashekar * update the fname here. 481bbf2a467SNagakiran Rajashekar */ 482bbf2a467SNagakiran Rajashekar mutex_enter(&rp->r_svlock); 483bbf2a467SNagakiran Rajashekar svpname = svp->sv_name; 484bbf2a467SNagakiran Rajashekar if (svp->sv_name != name) { 485bbf2a467SNagakiran Rajashekar svp->sv_name = name; 486bbf2a467SNagakiran Rajashekar mutex_exit(&rp->r_svlock); 487bbf2a467SNagakiran Rajashekar fn_rele(&svpname); 488bbf2a467SNagakiran Rajashekar } else { 489bbf2a467SNagakiran Rajashekar mutex_exit(&rp->r_svlock); 490bbf2a467SNagakiran Rajashekar fn_rele(&name); 491bbf2a467SNagakiran Rajashekar } 4927c478bd9Sstevel@tonic-gate } 4937c478bd9Sstevel@tonic-gate 4947c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock)); 4957c478bd9Sstevel@tonic-gate r4_do_attrcache(vp, garp, newnode, t, cr, index); 4967c478bd9Sstevel@tonic-gate ASSERT(rw_owner(&rtable4[index].r_lock) != curthread); 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate return (vp); 4997c478bd9Sstevel@tonic-gate } 5007c478bd9Sstevel@tonic-gate 5017c478bd9Sstevel@tonic-gate /* 5027c478bd9Sstevel@tonic-gate * Find or create a vnode for the given filehandle, filesystem, parent, and 5037c478bd9Sstevel@tonic-gate * name. The reference to nm is consumed, so the caller must first do an 5047c478bd9Sstevel@tonic-gate * fn_hold() if it wants to continue using nm after this call. 5057c478bd9Sstevel@tonic-gate */ 5067c478bd9Sstevel@tonic-gate vnode_t * 5077c478bd9Sstevel@tonic-gate makenfs4node(nfs4_sharedfh_t *fh, nfs4_ga_res_t *garp, struct vfs *vfsp, 508b9238976Sth hrtime_t t, cred_t *cr, vnode_t *dvp, nfs4_fname_t *nm) 5097c478bd9Sstevel@tonic-gate { 5107c478bd9Sstevel@tonic-gate vnode_t *vp; 5117c478bd9Sstevel@tonic-gate int newnode; 5127c478bd9Sstevel@tonic-gate int index; 5137c478bd9Sstevel@tonic-gate mntinfo4_t *mi = VFTOMI4(vfsp); 5147c478bd9Sstevel@tonic-gate int had_badfh = 0; 5157c478bd9Sstevel@tonic-gate rnode4_t *rp; 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate ASSERT(dvp != NULL); 5187c478bd9Sstevel@tonic-gate 5197c478bd9Sstevel@tonic-gate fh = badrootfh_check(fh, nm, mi, &had_badfh); 5207c478bd9Sstevel@tonic-gate 5217c478bd9Sstevel@tonic-gate index = rtable4hash(fh); 5227c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 5237c478bd9Sstevel@tonic-gate 5247c478bd9Sstevel@tonic-gate /* 5257c478bd9Sstevel@tonic-gate * Note: make_rnode4() may upgrade the hash bucket lock to exclusive. 5267c478bd9Sstevel@tonic-gate */ 5277c478bd9Sstevel@tonic-gate vp = make_rnode4(fh, &rtable4[index], vfsp, nfs4_vnodeops, 5287c478bd9Sstevel@tonic-gate nfs4_putapage, &newnode, cr); 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate rp = VTOR4(vp); 5317c478bd9Sstevel@tonic-gate sv_activate(&vp, dvp, &nm, newnode); 5327c478bd9Sstevel@tonic-gate if (dvp->v_flag & V_XATTRDIR) { 5337c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 5347c478bd9Sstevel@tonic-gate rp->r_flags |= R4ISXATTR; 5357c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 5367c478bd9Sstevel@tonic-gate } 5377c478bd9Sstevel@tonic-gate 5387c478bd9Sstevel@tonic-gate /* if getting a bad file handle, do not cache the attributes. */ 5397c478bd9Sstevel@tonic-gate if (had_badfh) { 5407c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 5417c478bd9Sstevel@tonic-gate return (vp); 5427c478bd9Sstevel@tonic-gate } 5437c478bd9Sstevel@tonic-gate 5447c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock)); 5457c478bd9Sstevel@tonic-gate r4_do_attrcache(vp, garp, newnode, t, cr, index); 5467c478bd9Sstevel@tonic-gate ASSERT(rw_owner(&rtable4[index].r_lock) != curthread); 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate return (vp); 5497c478bd9Sstevel@tonic-gate } 5507c478bd9Sstevel@tonic-gate 5517c478bd9Sstevel@tonic-gate /* 5527c478bd9Sstevel@tonic-gate * Hash on address of filehandle object. 5537c478bd9Sstevel@tonic-gate * XXX totally untuned. 5547c478bd9Sstevel@tonic-gate */ 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate int 5577c478bd9Sstevel@tonic-gate rtable4hash(nfs4_sharedfh_t *fh) 5587c478bd9Sstevel@tonic-gate { 5597c478bd9Sstevel@tonic-gate return (((uintptr_t)fh / sizeof (*fh)) & rtable4mask); 5607c478bd9Sstevel@tonic-gate } 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate /* 5637c478bd9Sstevel@tonic-gate * Find or create the vnode for the given filehandle and filesystem. 5647c478bd9Sstevel@tonic-gate * *newnode is set to zero if the vnode already existed; non-zero if it had 5657c478bd9Sstevel@tonic-gate * to be created. 5667c478bd9Sstevel@tonic-gate * 5677c478bd9Sstevel@tonic-gate * Note: make_rnode4() may upgrade the hash bucket lock to exclusive. 5687c478bd9Sstevel@tonic-gate */ 5697c478bd9Sstevel@tonic-gate 5707c478bd9Sstevel@tonic-gate static vnode_t * 5717c478bd9Sstevel@tonic-gate make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp, 5727c478bd9Sstevel@tonic-gate struct vnodeops *vops, 5737c478bd9Sstevel@tonic-gate int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *), 5747c478bd9Sstevel@tonic-gate int *newnode, cred_t *cr) 5757c478bd9Sstevel@tonic-gate { 5767c478bd9Sstevel@tonic-gate rnode4_t *rp; 5777c478bd9Sstevel@tonic-gate rnode4_t *trp; 5787c478bd9Sstevel@tonic-gate vnode_t *vp; 5797c478bd9Sstevel@tonic-gate mntinfo4_t *mi; 5807c478bd9Sstevel@tonic-gate 5817c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&rhtp->r_lock)); 5827c478bd9Sstevel@tonic-gate 5837c478bd9Sstevel@tonic-gate mi = VFTOMI4(vfsp); 5847c478bd9Sstevel@tonic-gate 5857c478bd9Sstevel@tonic-gate start: 5867c478bd9Sstevel@tonic-gate if ((rp = r4find(rhtp, fh, vfsp)) != NULL) { 5877c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 5887c478bd9Sstevel@tonic-gate *newnode = 0; 5897c478bd9Sstevel@tonic-gate return (vp); 5907c478bd9Sstevel@tonic-gate } 5917c478bd9Sstevel@tonic-gate rw_exit(&rhtp->r_lock); 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate if (rp4freelist != NULL && rnode4_new >= nrnode) { 5967c478bd9Sstevel@tonic-gate rp = rp4freelist; 5977c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 5987c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 5997c478bd9Sstevel@tonic-gate 6007c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 6037c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 6047c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 6057c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 6067c478bd9Sstevel@tonic-gate vp->v_count--; 6077c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6087c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 6097c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 6107c478bd9Sstevel@tonic-gate goto start; 6117c478bd9Sstevel@tonic-gate } 6127c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6137c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 6147c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 6157c478bd9Sstevel@tonic-gate } 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate r4inactive(rp, cr); 6187c478bd9Sstevel@tonic-gate 6197c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 6207c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 6217c478bd9Sstevel@tonic-gate vp->v_count--; 6227c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6237c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 6247c478bd9Sstevel@tonic-gate goto start; 6257c478bd9Sstevel@tonic-gate } 6267c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6277c478bd9Sstevel@tonic-gate vn_invalid(vp); 6287c478bd9Sstevel@tonic-gate 6297c478bd9Sstevel@tonic-gate /* 6307c478bd9Sstevel@tonic-gate * destroy old locks before bzero'ing and 6317c478bd9Sstevel@tonic-gate * recreating the locks below. 6327c478bd9Sstevel@tonic-gate */ 6337c478bd9Sstevel@tonic-gate uninit_rnode4(rp); 6347c478bd9Sstevel@tonic-gate 6357c478bd9Sstevel@tonic-gate /* 6367c478bd9Sstevel@tonic-gate * Make sure that if rnode is recycled then 6377c478bd9Sstevel@tonic-gate * VFS count is decremented properly before 6387c478bd9Sstevel@tonic-gate * reuse. 6397c478bd9Sstevel@tonic-gate */ 6407c478bd9Sstevel@tonic-gate VFS_RELE(vp->v_vfsp); 6417c478bd9Sstevel@tonic-gate vn_reinit(vp); 6427c478bd9Sstevel@tonic-gate } else { 6437c478bd9Sstevel@tonic-gate vnode_t *new_vp; 6447c478bd9Sstevel@tonic-gate 6457c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 6467c478bd9Sstevel@tonic-gate 6477c478bd9Sstevel@tonic-gate rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP); 6487c478bd9Sstevel@tonic-gate new_vp = vn_alloc(KM_SLEEP); 6497c478bd9Sstevel@tonic-gate 650*1a5e258fSJosef 'Jeff' Sipek atomic_inc_ulong((ulong_t *)&rnode4_new); 6517c478bd9Sstevel@tonic-gate #ifdef DEBUG 6527c478bd9Sstevel@tonic-gate clstat4_debug.nrnode.value.ui64++; 6537c478bd9Sstevel@tonic-gate #endif 6547c478bd9Sstevel@tonic-gate vp = new_vp; 6557c478bd9Sstevel@tonic-gate } 6567c478bd9Sstevel@tonic-gate 6577c478bd9Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 6587c478bd9Sstevel@tonic-gate rp->r_vnode = vp; 6597c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL); 6607c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL); 6617c478bd9Sstevel@tonic-gate mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL); 6627c478bd9Sstevel@tonic-gate mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL); 6637c478bd9Sstevel@tonic-gate mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL); 6647c478bd9Sstevel@tonic-gate mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL); 6657c478bd9Sstevel@tonic-gate rp->created_v4 = 0; 6667c478bd9Sstevel@tonic-gate list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t), 6677c478bd9Sstevel@tonic-gate offsetof(nfs4_open_stream_t, os_node)); 6687c478bd9Sstevel@tonic-gate rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head; 6697c478bd9Sstevel@tonic-gate rp->r_lo_head.lo_next_rnode = &rp->r_lo_head; 6707c478bd9Sstevel@tonic-gate cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL); 6717c478bd9Sstevel@tonic-gate cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL); 6727c478bd9Sstevel@tonic-gate rp->r_flags = R4READDIRWATTR; 6737c478bd9Sstevel@tonic-gate rp->r_fh = fh; 6747c478bd9Sstevel@tonic-gate rp->r_hashq = rhtp; 6757c478bd9Sstevel@tonic-gate sfh4_hold(rp->r_fh); 6767c478bd9Sstevel@tonic-gate rp->r_server = mi->mi_curr_serv; 6777c478bd9Sstevel@tonic-gate rp->r_deleg_type = OPEN_DELEGATE_NONE; 6787c478bd9Sstevel@tonic-gate rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE; 6797c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL); 6807c478bd9Sstevel@tonic-gate 6817c478bd9Sstevel@tonic-gate rddir4_cache_create(rp); 6827c478bd9Sstevel@tonic-gate rp->r_putapage = putapage; 6837c478bd9Sstevel@tonic-gate vn_setops(vp, vops); 6847c478bd9Sstevel@tonic-gate vp->v_data = (caddr_t)rp; 6857c478bd9Sstevel@tonic-gate vp->v_vfsp = vfsp; 6867c478bd9Sstevel@tonic-gate VFS_HOLD(vfsp); 6877c478bd9Sstevel@tonic-gate vp->v_type = VNON; 688f8bbc571SPavel Filipensky vp->v_flag |= VMODSORT; 6897c478bd9Sstevel@tonic-gate if (isrootfh(fh, rp)) 6907c478bd9Sstevel@tonic-gate vp->v_flag = VROOT; 6917c478bd9Sstevel@tonic-gate vn_exists(vp); 6927c478bd9Sstevel@tonic-gate 6937c478bd9Sstevel@tonic-gate /* 6947c478bd9Sstevel@tonic-gate * There is a race condition if someone else 6957c478bd9Sstevel@tonic-gate * alloc's the rnode while no locks are held, so we 6967c478bd9Sstevel@tonic-gate * check again and recover if found. 6977c478bd9Sstevel@tonic-gate */ 6987c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_WRITER); 6997c478bd9Sstevel@tonic-gate if ((trp = r4find(rhtp, fh, vfsp)) != NULL) { 7007c478bd9Sstevel@tonic-gate vp = RTOV4(trp); 7017c478bd9Sstevel@tonic-gate *newnode = 0; 7027c478bd9Sstevel@tonic-gate rw_exit(&rhtp->r_lock); 7037c478bd9Sstevel@tonic-gate rp4_addfree(rp, cr); 7047c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 7057c478bd9Sstevel@tonic-gate return (vp); 7067c478bd9Sstevel@tonic-gate } 7077c478bd9Sstevel@tonic-gate rp4_addhash(rp); 7087c478bd9Sstevel@tonic-gate *newnode = 1; 7097c478bd9Sstevel@tonic-gate return (vp); 7107c478bd9Sstevel@tonic-gate } 7117c478bd9Sstevel@tonic-gate 7127c478bd9Sstevel@tonic-gate static void 7137c478bd9Sstevel@tonic-gate uninit_rnode4(rnode4_t *rp) 7147c478bd9Sstevel@tonic-gate { 7157c478bd9Sstevel@tonic-gate vnode_t *vp = RTOV4(rp); 7167c478bd9Sstevel@tonic-gate 7177c478bd9Sstevel@tonic-gate ASSERT(rp != NULL); 7187c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 7197c478bd9Sstevel@tonic-gate ASSERT(vp->v_count == 1); 7207c478bd9Sstevel@tonic-gate ASSERT(rp->r_count == 0); 7217c478bd9Sstevel@tonic-gate ASSERT(rp->r_mapcnt == 0); 7227c478bd9Sstevel@tonic-gate if (rp->r_flags & R4LODANGLERS) { 7237c478bd9Sstevel@tonic-gate nfs4_flush_lock_owners(rp); 7247c478bd9Sstevel@tonic-gate } 7257c478bd9Sstevel@tonic-gate ASSERT(rp->r_lo_head.lo_next_rnode == &rp->r_lo_head); 7267c478bd9Sstevel@tonic-gate ASSERT(rp->r_lo_head.lo_prev_rnode == &rp->r_lo_head); 7277c478bd9Sstevel@tonic-gate ASSERT(!(rp->r_flags & R4HASHED)); 7287c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); 7297c478bd9Sstevel@tonic-gate nfs4_clear_open_streams(rp); 7307c478bd9Sstevel@tonic-gate list_destroy(&rp->r_open_streams); 7317c478bd9Sstevel@tonic-gate 7327c478bd9Sstevel@tonic-gate /* 7337c478bd9Sstevel@tonic-gate * Destroy the rddir cache first since we need to grab the r_statelock. 7347c478bd9Sstevel@tonic-gate */ 7357c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 7367c478bd9Sstevel@tonic-gate rddir4_cache_destroy(rp); 7377c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 7387c478bd9Sstevel@tonic-gate sv_uninit(&rp->r_svnode); 7397c478bd9Sstevel@tonic-gate sfh4_rele(&rp->r_fh); 7407c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_rwlock); 7417c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_lkserlock); 7427c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_statelock); 7437c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_statev4_lock); 7447c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_os_lock); 7457c478bd9Sstevel@tonic-gate cv_destroy(&rp->r_cv); 7467c478bd9Sstevel@tonic-gate cv_destroy(&rp->r_commit.c_cv); 7477c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_deleg_recall_lock); 7487c478bd9Sstevel@tonic-gate if (rp->r_flags & R4DELMAPLIST) 7497c478bd9Sstevel@tonic-gate list_destroy(&rp->r_indelmap); 7507c478bd9Sstevel@tonic-gate } 7517c478bd9Sstevel@tonic-gate 7527c478bd9Sstevel@tonic-gate /* 7537c478bd9Sstevel@tonic-gate * Put an rnode on the free list. 7547c478bd9Sstevel@tonic-gate * 7557c478bd9Sstevel@tonic-gate * Rnodes which were allocated above and beyond the normal limit 7567c478bd9Sstevel@tonic-gate * are immediately freed. 7577c478bd9Sstevel@tonic-gate */ 7587c478bd9Sstevel@tonic-gate void 7597c478bd9Sstevel@tonic-gate rp4_addfree(rnode4_t *rp, cred_t *cr) 7607c478bd9Sstevel@tonic-gate { 7617c478bd9Sstevel@tonic-gate vnode_t *vp; 7627c478bd9Sstevel@tonic-gate vnode_t *xattr; 7637c478bd9Sstevel@tonic-gate struct vfs *vfsp; 7647c478bd9Sstevel@tonic-gate 7657c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 7667c478bd9Sstevel@tonic-gate ASSERT(vp->v_count >= 1); 7677c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); 7687c478bd9Sstevel@tonic-gate 7697c478bd9Sstevel@tonic-gate /* 7707c478bd9Sstevel@tonic-gate * If we have too many rnodes allocated and there are no 7717c478bd9Sstevel@tonic-gate * references to this rnode, or if the rnode is no longer 7727c478bd9Sstevel@tonic-gate * accessible by it does not reside in the hash queues, 7737c478bd9Sstevel@tonic-gate * or if an i/o error occurred while writing to the file, 7747c478bd9Sstevel@tonic-gate * then just free it instead of putting it on the rnode 7757c478bd9Sstevel@tonic-gate * freelist. 7767c478bd9Sstevel@tonic-gate */ 7777c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp; 7787c478bd9Sstevel@tonic-gate if (((rnode4_new > nrnode || !(rp->r_flags & R4HASHED) || 7797c478bd9Sstevel@tonic-gate #ifdef DEBUG 7807c478bd9Sstevel@tonic-gate (nfs4_rnode_nofreelist != 0) || 7817c478bd9Sstevel@tonic-gate #endif 7827c478bd9Sstevel@tonic-gate rp->r_error || (rp->r_flags & R4RECOVERR) || 7837c478bd9Sstevel@tonic-gate (vfsp->vfs_flag & VFS_UNMOUNTED)) && rp->r_count == 0)) { 7847c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 7857c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 7867c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 7877c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 7887c478bd9Sstevel@tonic-gate vp->v_count--; 7897c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7907c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7917c478bd9Sstevel@tonic-gate return; 7927c478bd9Sstevel@tonic-gate } 7937c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7947c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 7957c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7967c478bd9Sstevel@tonic-gate } 7977c478bd9Sstevel@tonic-gate 7987c478bd9Sstevel@tonic-gate /* 7997c478bd9Sstevel@tonic-gate * Make sure we don't have a delegation on this rnode 8007c478bd9Sstevel@tonic-gate * before destroying it. 8017c478bd9Sstevel@tonic-gate */ 8027c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 8037c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, 804b9238976Sth NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); 8057c478bd9Sstevel@tonic-gate } 8067c478bd9Sstevel@tonic-gate 8077c478bd9Sstevel@tonic-gate r4inactive(rp, cr); 8087c478bd9Sstevel@tonic-gate 8097c478bd9Sstevel@tonic-gate /* 8107c478bd9Sstevel@tonic-gate * Recheck the vnode reference count. We need to 8117c478bd9Sstevel@tonic-gate * make sure that another reference has not been 8127c478bd9Sstevel@tonic-gate * acquired while we were not holding v_lock. The 8137c478bd9Sstevel@tonic-gate * rnode is not in the rnode hash queues; one 8147c478bd9Sstevel@tonic-gate * way for a reference to have been acquired 8157c478bd9Sstevel@tonic-gate * is for a VOP_PUTPAGE because the rnode was marked 8167c478bd9Sstevel@tonic-gate * with R4DIRTY or for a modified page. This 8177c478bd9Sstevel@tonic-gate * reference may have been acquired before our call 8187c478bd9Sstevel@tonic-gate * to r4inactive. The i/o may have been completed, 8197c478bd9Sstevel@tonic-gate * thus allowing r4inactive to complete, but the 8207c478bd9Sstevel@tonic-gate * reference to the vnode may not have been released 8217c478bd9Sstevel@tonic-gate * yet. In any case, the rnode can not be destroyed 8227c478bd9Sstevel@tonic-gate * until the other references to this vnode have been 8237c478bd9Sstevel@tonic-gate * released. The other references will take care of 8247c478bd9Sstevel@tonic-gate * either destroying the rnode or placing it on the 8257c478bd9Sstevel@tonic-gate * rnode freelist. If there are no other references, 8267c478bd9Sstevel@tonic-gate * then the rnode may be safely destroyed. 8277c478bd9Sstevel@tonic-gate */ 8287c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 8297c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 8307c478bd9Sstevel@tonic-gate vp->v_count--; 8317c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8327c478bd9Sstevel@tonic-gate return; 8337c478bd9Sstevel@tonic-gate } 8347c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8357c478bd9Sstevel@tonic-gate 8367c478bd9Sstevel@tonic-gate destroy_rnode4(rp); 8377c478bd9Sstevel@tonic-gate return; 8387c478bd9Sstevel@tonic-gate } 8397c478bd9Sstevel@tonic-gate 8407c478bd9Sstevel@tonic-gate /* 8417c478bd9Sstevel@tonic-gate * Lock the hash queue and then recheck the reference count 8427c478bd9Sstevel@tonic-gate * to ensure that no other threads have acquired a reference 8437c478bd9Sstevel@tonic-gate * to indicate that the rnode should not be placed on the 8447c478bd9Sstevel@tonic-gate * freelist. If another reference has been acquired, then 8457c478bd9Sstevel@tonic-gate * just release this one and let the other thread complete 8467c478bd9Sstevel@tonic-gate * the processing of adding this rnode to the freelist. 8477c478bd9Sstevel@tonic-gate */ 8487c478bd9Sstevel@tonic-gate again: 8497c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 8507c478bd9Sstevel@tonic-gate 8517c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 8527c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 8537c478bd9Sstevel@tonic-gate vp->v_count--; 8547c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8557c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 8567c478bd9Sstevel@tonic-gate return; 8577c478bd9Sstevel@tonic-gate } 8587c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8597c478bd9Sstevel@tonic-gate 8607c478bd9Sstevel@tonic-gate /* 8617c478bd9Sstevel@tonic-gate * Make sure we don't put an rnode with a delegation 8627c478bd9Sstevel@tonic-gate * on the free list. 8637c478bd9Sstevel@tonic-gate */ 8647c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 8657c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 8667c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, 867b9238976Sth NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); 8687c478bd9Sstevel@tonic-gate goto again; 8697c478bd9Sstevel@tonic-gate } 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * Now that we have the hash queue lock, and we know there 8737c478bd9Sstevel@tonic-gate * are not anymore references on the vnode, check to make 8747c478bd9Sstevel@tonic-gate * sure there aren't any open streams still on the rnode. 8757c478bd9Sstevel@tonic-gate * If so, drop the hash queue lock, remove the open streams, 8767c478bd9Sstevel@tonic-gate * and recheck the v_count. 8777c478bd9Sstevel@tonic-gate */ 8787c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_os_lock); 8797c478bd9Sstevel@tonic-gate if (list_head(&rp->r_open_streams) != NULL) { 8807c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 8817c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 882108322fbScarlsonj if (nfs_zone() != VTOMI4(vp)->mi_zone) 8837c478bd9Sstevel@tonic-gate nfs4_clear_open_streams(rp); 8847c478bd9Sstevel@tonic-gate else 8857c478bd9Sstevel@tonic-gate (void) nfs4close_all(vp, cr); 8867c478bd9Sstevel@tonic-gate goto again; 8877c478bd9Sstevel@tonic-gate } 8887c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 8897c478bd9Sstevel@tonic-gate 8909f9e2373Sjwahlig /* 8919f9e2373Sjwahlig * Before we put it on the freelist, make sure there are no pages. 8929f9e2373Sjwahlig * If there are, flush and commit of all of the dirty and 8939f9e2373Sjwahlig * uncommitted pages, assuming the file system isn't read only. 8949f9e2373Sjwahlig */ 8959f9e2373Sjwahlig if (!(vp->v_vfsp->vfs_flag & VFS_RDONLY) && nfs4_dross_pages(vp)) { 8969f9e2373Sjwahlig rw_exit(&rp->r_hashq->r_lock); 8979f9e2373Sjwahlig r4flushpages(rp, cr); 8989f9e2373Sjwahlig goto again; 8999f9e2373Sjwahlig } 9009f9e2373Sjwahlig 9017c478bd9Sstevel@tonic-gate /* 9027c478bd9Sstevel@tonic-gate * Before we put it on the freelist, make sure there is no 9037c478bd9Sstevel@tonic-gate * active xattr directory cached, the freelist will not 9047c478bd9Sstevel@tonic-gate * have its entries r4inactive'd if there is still an active 9057c478bd9Sstevel@tonic-gate * rnode, thus nothing in the freelist can hold another 9067c478bd9Sstevel@tonic-gate * rnode active. 9077c478bd9Sstevel@tonic-gate */ 9087c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 9097c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 9107c478bd9Sstevel@tonic-gate 9117c478bd9Sstevel@tonic-gate /* 9127c478bd9Sstevel@tonic-gate * If there is no cached data or metadata for this file, then 9137c478bd9Sstevel@tonic-gate * put the rnode on the front of the freelist so that it will 9147c478bd9Sstevel@tonic-gate * be reused before other rnodes which may have cached data or 9157c478bd9Sstevel@tonic-gate * metadata associated with them. 9167c478bd9Sstevel@tonic-gate */ 9177c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 9187c478bd9Sstevel@tonic-gate if (rp4freelist == NULL) { 9197c478bd9Sstevel@tonic-gate rp->r_freef = rp; 9207c478bd9Sstevel@tonic-gate rp->r_freeb = rp; 9217c478bd9Sstevel@tonic-gate rp4freelist = rp; 9227c478bd9Sstevel@tonic-gate } else { 9237c478bd9Sstevel@tonic-gate rp->r_freef = rp4freelist; 9247c478bd9Sstevel@tonic-gate rp->r_freeb = rp4freelist->r_freeb; 9257c478bd9Sstevel@tonic-gate rp4freelist->r_freeb->r_freef = rp; 9267c478bd9Sstevel@tonic-gate rp4freelist->r_freeb = rp; 9277c478bd9Sstevel@tonic-gate if (!nfs4_has_pages(vp) && rp->r_dir == NULL && 9289f9e2373Sjwahlig rp->r_symlink.contents == NULL && rp->r_secattr == NULL) 9297c478bd9Sstevel@tonic-gate rp4freelist = rp; 9307c478bd9Sstevel@tonic-gate } 9317c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 9327c478bd9Sstevel@tonic-gate 9337c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate if (xattr) 9367c478bd9Sstevel@tonic-gate VN_RELE(xattr); 9377c478bd9Sstevel@tonic-gate } 9387c478bd9Sstevel@tonic-gate 9397c478bd9Sstevel@tonic-gate /* 9407c478bd9Sstevel@tonic-gate * Remove an rnode from the free list. 9417c478bd9Sstevel@tonic-gate * 9427c478bd9Sstevel@tonic-gate * The caller must be holding rp4freelist_lock and the rnode 9437c478bd9Sstevel@tonic-gate * must be on the freelist. 9447c478bd9Sstevel@tonic-gate */ 9457c478bd9Sstevel@tonic-gate static void 9467c478bd9Sstevel@tonic-gate rp4_rmfree(rnode4_t *rp) 9477c478bd9Sstevel@tonic-gate { 9487c478bd9Sstevel@tonic-gate 9497c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&rp4freelist_lock)); 9507c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef != NULL && rp->r_freeb != NULL); 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate if (rp == rp4freelist) { 9537c478bd9Sstevel@tonic-gate rp4freelist = rp->r_freef; 9547c478bd9Sstevel@tonic-gate if (rp == rp4freelist) 9557c478bd9Sstevel@tonic-gate rp4freelist = NULL; 9567c478bd9Sstevel@tonic-gate } 9577c478bd9Sstevel@tonic-gate rp->r_freeb->r_freef = rp->r_freef; 9587c478bd9Sstevel@tonic-gate rp->r_freef->r_freeb = rp->r_freeb; 9597c478bd9Sstevel@tonic-gate 9607c478bd9Sstevel@tonic-gate rp->r_freef = rp->r_freeb = NULL; 9617c478bd9Sstevel@tonic-gate } 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate /* 9647c478bd9Sstevel@tonic-gate * Put a rnode in the hash table. 9657c478bd9Sstevel@tonic-gate * 9667c478bd9Sstevel@tonic-gate * The caller must be holding the exclusive hash queue lock 9677c478bd9Sstevel@tonic-gate */ 9687c478bd9Sstevel@tonic-gate void 9697c478bd9Sstevel@tonic-gate rp4_addhash(rnode4_t *rp) 9707c478bd9Sstevel@tonic-gate { 9717c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock)); 9727c478bd9Sstevel@tonic-gate ASSERT(!(rp->r_flags & R4HASHED)); 9737c478bd9Sstevel@tonic-gate 9747c478bd9Sstevel@tonic-gate #ifdef DEBUG 9757c478bd9Sstevel@tonic-gate r4_dup_check(rp, RTOV4(rp)->v_vfsp); 9767c478bd9Sstevel@tonic-gate #endif 9777c478bd9Sstevel@tonic-gate 9787c478bd9Sstevel@tonic-gate rp->r_hashf = rp->r_hashq->r_hashf; 9797c478bd9Sstevel@tonic-gate rp->r_hashq->r_hashf = rp; 9807c478bd9Sstevel@tonic-gate rp->r_hashb = (rnode4_t *)rp->r_hashq; 9817c478bd9Sstevel@tonic-gate rp->r_hashf->r_hashb = rp; 9827c478bd9Sstevel@tonic-gate 9837c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9847c478bd9Sstevel@tonic-gate rp->r_flags |= R4HASHED; 9857c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9867c478bd9Sstevel@tonic-gate } 9877c478bd9Sstevel@tonic-gate 9887c478bd9Sstevel@tonic-gate /* 9897c478bd9Sstevel@tonic-gate * Remove a rnode from the hash table. 9907c478bd9Sstevel@tonic-gate * 9917c478bd9Sstevel@tonic-gate * The caller must be holding the hash queue lock. 9927c478bd9Sstevel@tonic-gate */ 9937c478bd9Sstevel@tonic-gate void 9947c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rnode4_t *rp) 9957c478bd9Sstevel@tonic-gate { 9967c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock)); 9977c478bd9Sstevel@tonic-gate ASSERT(rp->r_flags & R4HASHED); 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate rp->r_hashb->r_hashf = rp->r_hashf; 10007c478bd9Sstevel@tonic-gate rp->r_hashf->r_hashb = rp->r_hashb; 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 10037c478bd9Sstevel@tonic-gate rp->r_flags &= ~R4HASHED; 10047c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10057c478bd9Sstevel@tonic-gate } 10067c478bd9Sstevel@tonic-gate 10077c478bd9Sstevel@tonic-gate /* 10087c478bd9Sstevel@tonic-gate * Remove a rnode from the hash table. 10097c478bd9Sstevel@tonic-gate * 10107c478bd9Sstevel@tonic-gate * The caller must not be holding the hash queue lock. 10117c478bd9Sstevel@tonic-gate */ 10127c478bd9Sstevel@tonic-gate void 10137c478bd9Sstevel@tonic-gate rp4_rmhash(rnode4_t *rp) 10147c478bd9Sstevel@tonic-gate { 10157c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 10167c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 10177c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 10187c478bd9Sstevel@tonic-gate } 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate /* 10217c478bd9Sstevel@tonic-gate * Lookup a rnode by fhandle. Ignores rnodes that had failed recovery. 10227c478bd9Sstevel@tonic-gate * Returns NULL if no match. If an rnode is returned, the reference count 10237c478bd9Sstevel@tonic-gate * on the master vnode is incremented. 10247c478bd9Sstevel@tonic-gate * 10257c478bd9Sstevel@tonic-gate * The caller must be holding the hash queue lock, either shared or exclusive. 10267c478bd9Sstevel@tonic-gate */ 10277c478bd9Sstevel@tonic-gate rnode4_t * 10287c478bd9Sstevel@tonic-gate r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp) 10297c478bd9Sstevel@tonic-gate { 10307c478bd9Sstevel@tonic-gate rnode4_t *rp; 10317c478bd9Sstevel@tonic-gate vnode_t *vp; 10327c478bd9Sstevel@tonic-gate 10337c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rhtp->r_lock)); 10347c478bd9Sstevel@tonic-gate 10357c478bd9Sstevel@tonic-gate for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) { 10367c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 10377c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) { 10387c478bd9Sstevel@tonic-gate 10397c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 10407c478bd9Sstevel@tonic-gate if (rp->r_flags & R4RECOVERR) { 10417c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10427c478bd9Sstevel@tonic-gate continue; 10437c478bd9Sstevel@tonic-gate } 10447c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10457c478bd9Sstevel@tonic-gate #ifdef DEBUG 10467c478bd9Sstevel@tonic-gate r4_dup_check(rp, vfsp); 10477c478bd9Sstevel@tonic-gate #endif 10487c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 10497c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 10507c478bd9Sstevel@tonic-gate /* 10517c478bd9Sstevel@tonic-gate * If the rnode is on the freelist, 10527c478bd9Sstevel@tonic-gate * then remove it and use that reference 10537c478bd9Sstevel@tonic-gate * as the new reference. Otherwise, 10547c478bd9Sstevel@tonic-gate * need to increment the reference count. 10557c478bd9Sstevel@tonic-gate */ 10567c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 10577c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 10587c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 10597c478bd9Sstevel@tonic-gate } else { 10607c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 10617c478bd9Sstevel@tonic-gate VN_HOLD(vp); 10627c478bd9Sstevel@tonic-gate } 10637c478bd9Sstevel@tonic-gate } else 10647c478bd9Sstevel@tonic-gate VN_HOLD(vp); 10657c478bd9Sstevel@tonic-gate 10667c478bd9Sstevel@tonic-gate /* 10677c478bd9Sstevel@tonic-gate * if root vnode, set v_flag to indicate that 10687c478bd9Sstevel@tonic-gate */ 10697c478bd9Sstevel@tonic-gate if (isrootfh(fh, rp)) { 10707c478bd9Sstevel@tonic-gate if (!(vp->v_flag & VROOT)) { 10717c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 10727c478bd9Sstevel@tonic-gate vp->v_flag |= VROOT; 10737c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 10747c478bd9Sstevel@tonic-gate } 10757c478bd9Sstevel@tonic-gate } 10767c478bd9Sstevel@tonic-gate return (rp); 10777c478bd9Sstevel@tonic-gate } 10787c478bd9Sstevel@tonic-gate } 10797c478bd9Sstevel@tonic-gate return (NULL); 10807c478bd9Sstevel@tonic-gate } 10817c478bd9Sstevel@tonic-gate 10827c478bd9Sstevel@tonic-gate /* 10837c478bd9Sstevel@tonic-gate * Lookup an rnode by fhandle. Just a wrapper for r4find() 10847c478bd9Sstevel@tonic-gate * that assumes the caller hasn't already got the lock 10857c478bd9Sstevel@tonic-gate * on the hash bucket. 10867c478bd9Sstevel@tonic-gate */ 10877c478bd9Sstevel@tonic-gate rnode4_t * 10887c478bd9Sstevel@tonic-gate r4find_unlocked(nfs4_sharedfh_t *fh, struct vfs *vfsp) 10897c478bd9Sstevel@tonic-gate { 10907c478bd9Sstevel@tonic-gate rnode4_t *rp; 10917c478bd9Sstevel@tonic-gate int index; 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gate index = rtable4hash(fh); 10947c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 10957c478bd9Sstevel@tonic-gate rp = r4find(&rtable4[index], fh, vfsp); 10967c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 10977c478bd9Sstevel@tonic-gate 10987c478bd9Sstevel@tonic-gate return (rp); 10997c478bd9Sstevel@tonic-gate } 11007c478bd9Sstevel@tonic-gate 11017c478bd9Sstevel@tonic-gate /* 11026962f5b8SThomas Haynes * Return >0 if there is a active vnode belonging to this vfs in the 11037c478bd9Sstevel@tonic-gate * rtable4 cache. 11047c478bd9Sstevel@tonic-gate * 11057c478bd9Sstevel@tonic-gate * Several of these checks are done without holding the usual 11067c478bd9Sstevel@tonic-gate * locks. This is safe because destroy_rtable(), rp_addfree(), 11077c478bd9Sstevel@tonic-gate * etc. will redo the necessary checks before actually destroying 11087c478bd9Sstevel@tonic-gate * any rnodes. 11097c478bd9Sstevel@tonic-gate */ 11107c478bd9Sstevel@tonic-gate int 11117c478bd9Sstevel@tonic-gate check_rtable4(struct vfs *vfsp) 11127c478bd9Sstevel@tonic-gate { 11137c478bd9Sstevel@tonic-gate rnode4_t *rp; 11147c478bd9Sstevel@tonic-gate vnode_t *vp; 11156962f5b8SThomas Haynes int busy = NFSV4_RTABLE4_OK; 11167c478bd9Sstevel@tonic-gate int index; 11177c478bd9Sstevel@tonic-gate 11187c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 11197c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 11207c478bd9Sstevel@tonic-gate 11217c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 11227c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 11237c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 11247c478bd9Sstevel@tonic-gate 11257c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11267c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp) { 11277c478bd9Sstevel@tonic-gate if (rp->r_freef == NULL) { 11286962f5b8SThomas Haynes busy = NFSV4_RTABLE4_NOT_FREE_LIST; 11297c478bd9Sstevel@tonic-gate } else if (nfs4_has_pages(vp) && 1130b9238976Sth (rp->r_flags & R4DIRTY)) { 11316962f5b8SThomas Haynes busy = NFSV4_RTABLE4_DIRTY_PAGES; 11327c478bd9Sstevel@tonic-gate } else if (rp->r_count > 0) { 11336962f5b8SThomas Haynes busy = NFSV4_RTABLE4_POS_R_COUNT; 11347c478bd9Sstevel@tonic-gate } 11357c478bd9Sstevel@tonic-gate 11366962f5b8SThomas Haynes if (busy != NFSV4_RTABLE4_OK) { 11377c478bd9Sstevel@tonic-gate #ifdef DEBUG 11387c478bd9Sstevel@tonic-gate char *path; 11397c478bd9Sstevel@tonic-gate 11407c478bd9Sstevel@tonic-gate path = fn_path(rp->r_svnode.sv_name); 11416962f5b8SThomas Haynes DTRACE_NFSV4_3(rnode__e__debug, 11426962f5b8SThomas Haynes int, busy, char *, path, 11436962f5b8SThomas Haynes rnode4_t *, rp); 11447c478bd9Sstevel@tonic-gate kmem_free(path, strlen(path)+1); 11457c478bd9Sstevel@tonic-gate #endif 11467c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11476962f5b8SThomas Haynes return (busy); 11487c478bd9Sstevel@tonic-gate } 11497c478bd9Sstevel@tonic-gate } 11507c478bd9Sstevel@tonic-gate } 11517c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11527c478bd9Sstevel@tonic-gate } 11536962f5b8SThomas Haynes return (busy); 11547c478bd9Sstevel@tonic-gate } 11557c478bd9Sstevel@tonic-gate 11567c478bd9Sstevel@tonic-gate /* 11577c478bd9Sstevel@tonic-gate * Destroy inactive vnodes from the hash queues which 11587c478bd9Sstevel@tonic-gate * belong to this vfs. All of the vnodes should be inactive. 1159b9238976Sth * It is essential that we destroy all rnodes in case of 11607c478bd9Sstevel@tonic-gate * forced unmount as well as in normal unmount case. 11617c478bd9Sstevel@tonic-gate */ 11627c478bd9Sstevel@tonic-gate 11637c478bd9Sstevel@tonic-gate void 11647c478bd9Sstevel@tonic-gate destroy_rtable4(struct vfs *vfsp, cred_t *cr) 11657c478bd9Sstevel@tonic-gate { 11667c478bd9Sstevel@tonic-gate int index; 11677c478bd9Sstevel@tonic-gate vnode_t *vp; 11687c478bd9Sstevel@tonic-gate rnode4_t *rp, *r_hashf, *rlist; 11697c478bd9Sstevel@tonic-gate 11707c478bd9Sstevel@tonic-gate rlist = NULL; 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 11737c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_WRITER); 11747c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 11757c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 11767c478bd9Sstevel@tonic-gate rp = r_hashf) { 11777c478bd9Sstevel@tonic-gate /* save the hash pointer before destroying */ 11787c478bd9Sstevel@tonic-gate r_hashf = rp->r_hashf; 11797c478bd9Sstevel@tonic-gate 11807c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11817c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp) { 11827c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 11837c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 11847c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 11857c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 11867c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 11877c478bd9Sstevel@tonic-gate rp->r_hashf = rlist; 11887c478bd9Sstevel@tonic-gate rlist = rp; 11897c478bd9Sstevel@tonic-gate } else 11907c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 11917c478bd9Sstevel@tonic-gate } 11927c478bd9Sstevel@tonic-gate } 11937c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11947c478bd9Sstevel@tonic-gate } 11957c478bd9Sstevel@tonic-gate 11967c478bd9Sstevel@tonic-gate for (rp = rlist; rp != NULL; rp = r_hashf) { 11977c478bd9Sstevel@tonic-gate r_hashf = rp->r_hashf; 11987c478bd9Sstevel@tonic-gate /* 11997c478bd9Sstevel@tonic-gate * This call to rp4_addfree will end up destroying the 12007c478bd9Sstevel@tonic-gate * rnode, but in a safe way with the appropriate set 12017c478bd9Sstevel@tonic-gate * of checks done. 12027c478bd9Sstevel@tonic-gate */ 12037c478bd9Sstevel@tonic-gate rp4_addfree(rp, cr); 12047c478bd9Sstevel@tonic-gate } 12057c478bd9Sstevel@tonic-gate } 12067c478bd9Sstevel@tonic-gate 12077c478bd9Sstevel@tonic-gate /* 12087c478bd9Sstevel@tonic-gate * This routine destroys all the resources of an rnode 12097c478bd9Sstevel@tonic-gate * and finally the rnode itself. 12107c478bd9Sstevel@tonic-gate */ 12117c478bd9Sstevel@tonic-gate static void 12127c478bd9Sstevel@tonic-gate destroy_rnode4(rnode4_t *rp) 12137c478bd9Sstevel@tonic-gate { 12147c478bd9Sstevel@tonic-gate vnode_t *vp; 12157c478bd9Sstevel@tonic-gate vfs_t *vfsp; 12167c478bd9Sstevel@tonic-gate 12177c478bd9Sstevel@tonic-gate ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE); 12187c478bd9Sstevel@tonic-gate 12197c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 12207c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp; 12217c478bd9Sstevel@tonic-gate 12227c478bd9Sstevel@tonic-gate uninit_rnode4(rp); 1223*1a5e258fSJosef 'Jeff' Sipek atomic_dec_ulong((ulong_t *)&rnode4_new); 12247c478bd9Sstevel@tonic-gate #ifdef DEBUG 12257c478bd9Sstevel@tonic-gate clstat4_debug.nrnode.value.ui64--; 12267c478bd9Sstevel@tonic-gate #endif 12277c478bd9Sstevel@tonic-gate kmem_cache_free(rnode4_cache, rp); 12287c478bd9Sstevel@tonic-gate vn_invalid(vp); 12297c478bd9Sstevel@tonic-gate vn_free(vp); 12307c478bd9Sstevel@tonic-gate VFS_RELE(vfsp); 12317c478bd9Sstevel@tonic-gate } 12327c478bd9Sstevel@tonic-gate 12337c478bd9Sstevel@tonic-gate /* 12347c478bd9Sstevel@tonic-gate * Invalidate the attributes on all rnodes forcing the next getattr 12357c478bd9Sstevel@tonic-gate * to go over the wire. Used to flush stale uid and gid mappings. 12367c478bd9Sstevel@tonic-gate * Maybe done on a per vfsp, or all rnodes (vfsp == NULL) 12377c478bd9Sstevel@tonic-gate */ 12387c478bd9Sstevel@tonic-gate void 12397c478bd9Sstevel@tonic-gate nfs4_rnode_invalidate(struct vfs *vfsp) 12407c478bd9Sstevel@tonic-gate { 12417c478bd9Sstevel@tonic-gate int index; 12427c478bd9Sstevel@tonic-gate rnode4_t *rp; 12437c478bd9Sstevel@tonic-gate vnode_t *vp; 12447c478bd9Sstevel@tonic-gate 12457c478bd9Sstevel@tonic-gate /* 12467c478bd9Sstevel@tonic-gate * Walk the hash queues looking for rnodes. 12477c478bd9Sstevel@tonic-gate */ 12487c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 12497c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 12507c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 12517c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 12527c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 12537c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 12547c478bd9Sstevel@tonic-gate if (vfsp != NULL && vp->v_vfsp != vfsp) 12557c478bd9Sstevel@tonic-gate continue; 12567c478bd9Sstevel@tonic-gate 12577c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&rp->r_statelock)) 12587c478bd9Sstevel@tonic-gate continue; 12597c478bd9Sstevel@tonic-gate 12607c478bd9Sstevel@tonic-gate /* 12617c478bd9Sstevel@tonic-gate * Expire the attributes by resetting the change 12627c478bd9Sstevel@tonic-gate * and attr timeout. 12637c478bd9Sstevel@tonic-gate */ 12647c478bd9Sstevel@tonic-gate rp->r_change = 0; 12657c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4_LOCKED(rp); 12667c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 12677c478bd9Sstevel@tonic-gate } 12687c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 12697c478bd9Sstevel@tonic-gate } 12707c478bd9Sstevel@tonic-gate } 12717c478bd9Sstevel@tonic-gate 12727c478bd9Sstevel@tonic-gate /* 12737c478bd9Sstevel@tonic-gate * Flush all vnodes in this (or every) vfs. 12747c478bd9Sstevel@tonic-gate * Used by nfs_sync and by nfs_unmount. 12757c478bd9Sstevel@tonic-gate */ 12767c478bd9Sstevel@tonic-gate void 12777c478bd9Sstevel@tonic-gate r4flush(struct vfs *vfsp, cred_t *cr) 12787c478bd9Sstevel@tonic-gate { 12797c478bd9Sstevel@tonic-gate int index; 12807c478bd9Sstevel@tonic-gate rnode4_t *rp; 12817c478bd9Sstevel@tonic-gate vnode_t *vp, **vplist; 12827c478bd9Sstevel@tonic-gate long num, cnt; 12837c478bd9Sstevel@tonic-gate 12847c478bd9Sstevel@tonic-gate /* 12857c478bd9Sstevel@tonic-gate * Check to see whether there is anything to do. 12867c478bd9Sstevel@tonic-gate */ 12877c478bd9Sstevel@tonic-gate num = rnode4_new; 12887c478bd9Sstevel@tonic-gate if (num == 0) 12897c478bd9Sstevel@tonic-gate return; 12907c478bd9Sstevel@tonic-gate 12917c478bd9Sstevel@tonic-gate /* 12927c478bd9Sstevel@tonic-gate * Allocate a slot for all currently active rnodes on the 12937c478bd9Sstevel@tonic-gate * supposition that they all may need flushing. 12947c478bd9Sstevel@tonic-gate */ 12957c478bd9Sstevel@tonic-gate vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP); 12967c478bd9Sstevel@tonic-gate cnt = 0; 12977c478bd9Sstevel@tonic-gate 12987c478bd9Sstevel@tonic-gate /* 12997c478bd9Sstevel@tonic-gate * Walk the hash queues looking for rnodes with page 13007c478bd9Sstevel@tonic-gate * lists associated with them. Make a list of these 13017c478bd9Sstevel@tonic-gate * files. 13027c478bd9Sstevel@tonic-gate */ 13037c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 13047c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 13057c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 13067c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 13077c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 13087c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 13097c478bd9Sstevel@tonic-gate /* 13107c478bd9Sstevel@tonic-gate * Don't bother sync'ing a vp if it 13117c478bd9Sstevel@tonic-gate * is part of virtual swap device or 13127c478bd9Sstevel@tonic-gate * if VFS is read-only 13137c478bd9Sstevel@tonic-gate */ 13147c478bd9Sstevel@tonic-gate if (IS_SWAPVP(vp) || vn_is_readonly(vp)) 13157c478bd9Sstevel@tonic-gate continue; 13167c478bd9Sstevel@tonic-gate /* 13177c478bd9Sstevel@tonic-gate * If flushing all mounted file systems or 13187c478bd9Sstevel@tonic-gate * the vnode belongs to this vfs, has pages 13197c478bd9Sstevel@tonic-gate * and is marked as either dirty or mmap'd, 13207c478bd9Sstevel@tonic-gate * hold and add this vnode to the list of 13217c478bd9Sstevel@tonic-gate * vnodes to flush. 13227c478bd9Sstevel@tonic-gate */ 13237c478bd9Sstevel@tonic-gate if ((vfsp == NULL || vp->v_vfsp == vfsp) && 13247c478bd9Sstevel@tonic-gate nfs4_has_pages(vp) && 13257c478bd9Sstevel@tonic-gate ((rp->r_flags & R4DIRTY) || rp->r_mapcnt > 0)) { 13267c478bd9Sstevel@tonic-gate VN_HOLD(vp); 13277c478bd9Sstevel@tonic-gate vplist[cnt++] = vp; 13287c478bd9Sstevel@tonic-gate if (cnt == num) { 13297c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 13307c478bd9Sstevel@tonic-gate goto toomany; 13317c478bd9Sstevel@tonic-gate } 13327c478bd9Sstevel@tonic-gate } 13337c478bd9Sstevel@tonic-gate } 13347c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 13357c478bd9Sstevel@tonic-gate } 13367c478bd9Sstevel@tonic-gate toomany: 13377c478bd9Sstevel@tonic-gate 13387c478bd9Sstevel@tonic-gate /* 13397c478bd9Sstevel@tonic-gate * Flush and release all of the files on the list. 13407c478bd9Sstevel@tonic-gate */ 13417c478bd9Sstevel@tonic-gate while (cnt-- > 0) { 13427c478bd9Sstevel@tonic-gate vp = vplist[cnt]; 1343da6c28aaSamw (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL); 13447c478bd9Sstevel@tonic-gate VN_RELE(vp); 13457c478bd9Sstevel@tonic-gate } 13467c478bd9Sstevel@tonic-gate 13477c478bd9Sstevel@tonic-gate /* 13487c478bd9Sstevel@tonic-gate * Free the space allocated to hold the list. 13497c478bd9Sstevel@tonic-gate */ 13507c478bd9Sstevel@tonic-gate kmem_free(vplist, num * sizeof (*vplist)); 13517c478bd9Sstevel@tonic-gate } 13527c478bd9Sstevel@tonic-gate 13537c478bd9Sstevel@tonic-gate int 13547c478bd9Sstevel@tonic-gate nfs4_free_data_reclaim(rnode4_t *rp) 13557c478bd9Sstevel@tonic-gate { 13567c478bd9Sstevel@tonic-gate char *contents; 13577c478bd9Sstevel@tonic-gate vnode_t *xattr; 13587c478bd9Sstevel@tonic-gate int size; 13597c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 13607c478bd9Sstevel@tonic-gate int freed; 13617c478bd9Sstevel@tonic-gate bool_t rdc = FALSE; 13627c478bd9Sstevel@tonic-gate 13637c478bd9Sstevel@tonic-gate /* 13647c478bd9Sstevel@tonic-gate * Free any held caches which may 13657c478bd9Sstevel@tonic-gate * be associated with this rnode. 13667c478bd9Sstevel@tonic-gate */ 13677c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 13687c478bd9Sstevel@tonic-gate if (rp->r_dir != NULL) 13697c478bd9Sstevel@tonic-gate rdc = TRUE; 13707c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 13717c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 13727c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 13737c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 13747c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 13757c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 13767c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 13777c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 13787c478bd9Sstevel@tonic-gate 13797c478bd9Sstevel@tonic-gate /* 13807c478bd9Sstevel@tonic-gate * Free the access cache entries. 13817c478bd9Sstevel@tonic-gate */ 13827c478bd9Sstevel@tonic-gate freed = nfs4_access_purge_rp(rp); 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate if (rdc == FALSE && contents == NULL && vsp == NULL && xattr == NULL) 13857c478bd9Sstevel@tonic-gate return (freed); 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate /* 13887c478bd9Sstevel@tonic-gate * Free the readdir cache entries, incompletely if we can't block. 13897c478bd9Sstevel@tonic-gate */ 13907c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(RTOV4(rp)); 13917c478bd9Sstevel@tonic-gate 13927c478bd9Sstevel@tonic-gate /* 13937c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 13947c478bd9Sstevel@tonic-gate */ 13957c478bd9Sstevel@tonic-gate if (contents != NULL) { 13967c478bd9Sstevel@tonic-gate 13977c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 13987c478bd9Sstevel@tonic-gate } 13997c478bd9Sstevel@tonic-gate 14007c478bd9Sstevel@tonic-gate /* 14017c478bd9Sstevel@tonic-gate * Free any cached ACL. 14027c478bd9Sstevel@tonic-gate */ 14037c478bd9Sstevel@tonic-gate if (vsp != NULL) 14047c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 14057c478bd9Sstevel@tonic-gate 14067c478bd9Sstevel@tonic-gate /* 14077c478bd9Sstevel@tonic-gate * Release the xattr directory vnode 14087c478bd9Sstevel@tonic-gate */ 14097c478bd9Sstevel@tonic-gate if (xattr != NULL) 14107c478bd9Sstevel@tonic-gate VN_RELE(xattr); 14117c478bd9Sstevel@tonic-gate 14127c478bd9Sstevel@tonic-gate return (1); 14137c478bd9Sstevel@tonic-gate } 14147c478bd9Sstevel@tonic-gate 14157c478bd9Sstevel@tonic-gate static int 14167c478bd9Sstevel@tonic-gate nfs4_active_data_reclaim(rnode4_t *rp) 14177c478bd9Sstevel@tonic-gate { 14187c478bd9Sstevel@tonic-gate char *contents; 14192937862bSPavel Filipensky vnode_t *xattr = NULL; 14207c478bd9Sstevel@tonic-gate int size; 14217c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 14227c478bd9Sstevel@tonic-gate int freed; 14237c478bd9Sstevel@tonic-gate bool_t rdc = FALSE; 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate /* 14267c478bd9Sstevel@tonic-gate * Free any held credentials and caches which 14277c478bd9Sstevel@tonic-gate * may be associated with this rnode. 14287c478bd9Sstevel@tonic-gate */ 14297c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&rp->r_statelock)) 14307c478bd9Sstevel@tonic-gate return (0); 14317c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 14327c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 14337c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 14347c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 14357c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 14367c478bd9Sstevel@tonic-gate if (rp->r_dir != NULL) 14377c478bd9Sstevel@tonic-gate rdc = TRUE; 14382937862bSPavel Filipensky /* 14392937862bSPavel Filipensky * To avoid a deadlock, do not free r_xattr_dir cache if it is hashed 14402937862bSPavel Filipensky * on the same r_hashq queue. We are not mandated to free all caches. 14412937862bSPavel Filipensky * VN_RELE(rp->r_xattr_dir) will be done sometime later - e.g. when the 14422937862bSPavel Filipensky * rnode 'rp' is freed or put on the free list. 144372dd5e52SMarcel Telka * 144472dd5e52SMarcel Telka * We will retain NFS4_XATTR_DIR_NOTSUPP because: 144572dd5e52SMarcel Telka * - it has no associated rnode4_t (its v_data is NULL), 144672dd5e52SMarcel Telka * - it is preallocated statically and will never go away, 144772dd5e52SMarcel Telka * so we cannot save anything by releasing it. 14482937862bSPavel Filipensky */ 144972dd5e52SMarcel Telka if (rp->r_xattr_dir && rp->r_xattr_dir != NFS4_XATTR_DIR_NOTSUPP && 145072dd5e52SMarcel Telka VTOR4(rp->r_xattr_dir)->r_hashq != rp->r_hashq) { 14512937862bSPavel Filipensky xattr = rp->r_xattr_dir; 14522937862bSPavel Filipensky rp->r_xattr_dir = NULL; 14532937862bSPavel Filipensky } 14547c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 14557c478bd9Sstevel@tonic-gate 14567c478bd9Sstevel@tonic-gate /* 14577c478bd9Sstevel@tonic-gate * Free the access cache entries. 14587c478bd9Sstevel@tonic-gate */ 14597c478bd9Sstevel@tonic-gate freed = nfs4_access_purge_rp(rp); 14607c478bd9Sstevel@tonic-gate 14617c478bd9Sstevel@tonic-gate if (contents == NULL && vsp == NULL && rdc == FALSE && xattr == NULL) 14627c478bd9Sstevel@tonic-gate return (freed); 14637c478bd9Sstevel@tonic-gate 14647c478bd9Sstevel@tonic-gate /* 14657c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 14667c478bd9Sstevel@tonic-gate */ 14677c478bd9Sstevel@tonic-gate if (contents != NULL) { 14687c478bd9Sstevel@tonic-gate 14697c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 14707c478bd9Sstevel@tonic-gate } 14717c478bd9Sstevel@tonic-gate 14727c478bd9Sstevel@tonic-gate /* 14737c478bd9Sstevel@tonic-gate * Free any cached ACL. 14747c478bd9Sstevel@tonic-gate */ 14757c478bd9Sstevel@tonic-gate if (vsp != NULL) 14767c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 14777c478bd9Sstevel@tonic-gate 14787c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(RTOV4(rp)); 14797c478bd9Sstevel@tonic-gate 14807c478bd9Sstevel@tonic-gate /* 14817c478bd9Sstevel@tonic-gate * Release the xattr directory vnode 14827c478bd9Sstevel@tonic-gate */ 14837c478bd9Sstevel@tonic-gate if (xattr != NULL) 14847c478bd9Sstevel@tonic-gate VN_RELE(xattr); 14857c478bd9Sstevel@tonic-gate 14867c478bd9Sstevel@tonic-gate return (1); 14877c478bd9Sstevel@tonic-gate } 14887c478bd9Sstevel@tonic-gate 14897c478bd9Sstevel@tonic-gate static int 14907c478bd9Sstevel@tonic-gate nfs4_free_reclaim(void) 14917c478bd9Sstevel@tonic-gate { 14927c478bd9Sstevel@tonic-gate int freed; 14937c478bd9Sstevel@tonic-gate rnode4_t *rp; 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate #ifdef DEBUG 14967c478bd9Sstevel@tonic-gate clstat4_debug.f_reclaim.value.ui64++; 14977c478bd9Sstevel@tonic-gate #endif 14987c478bd9Sstevel@tonic-gate freed = 0; 14997c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15007c478bd9Sstevel@tonic-gate rp = rp4freelist; 15017c478bd9Sstevel@tonic-gate if (rp != NULL) { 15027c478bd9Sstevel@tonic-gate do { 15037c478bd9Sstevel@tonic-gate if (nfs4_free_data_reclaim(rp)) 15047c478bd9Sstevel@tonic-gate freed = 1; 15057c478bd9Sstevel@tonic-gate } while ((rp = rp->r_freef) != rp4freelist); 15067c478bd9Sstevel@tonic-gate } 15077c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 15087c478bd9Sstevel@tonic-gate return (freed); 15097c478bd9Sstevel@tonic-gate } 15107c478bd9Sstevel@tonic-gate 15117c478bd9Sstevel@tonic-gate static int 15127c478bd9Sstevel@tonic-gate nfs4_active_reclaim(void) 15137c478bd9Sstevel@tonic-gate { 15147c478bd9Sstevel@tonic-gate int freed; 15157c478bd9Sstevel@tonic-gate int index; 15167c478bd9Sstevel@tonic-gate rnode4_t *rp; 15177c478bd9Sstevel@tonic-gate 15187c478bd9Sstevel@tonic-gate #ifdef DEBUG 15197c478bd9Sstevel@tonic-gate clstat4_debug.a_reclaim.value.ui64++; 15207c478bd9Sstevel@tonic-gate #endif 15217c478bd9Sstevel@tonic-gate freed = 0; 15227c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 15237c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 15247c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 15257c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 15267c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 15277c478bd9Sstevel@tonic-gate if (nfs4_active_data_reclaim(rp)) 15287c478bd9Sstevel@tonic-gate freed = 1; 15297c478bd9Sstevel@tonic-gate } 15307c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 15317c478bd9Sstevel@tonic-gate } 15327c478bd9Sstevel@tonic-gate return (freed); 15337c478bd9Sstevel@tonic-gate } 15347c478bd9Sstevel@tonic-gate 15357c478bd9Sstevel@tonic-gate static int 15367c478bd9Sstevel@tonic-gate nfs4_rnode_reclaim(void) 15377c478bd9Sstevel@tonic-gate { 15387c478bd9Sstevel@tonic-gate int freed; 15397c478bd9Sstevel@tonic-gate rnode4_t *rp; 15407c478bd9Sstevel@tonic-gate vnode_t *vp; 15417c478bd9Sstevel@tonic-gate 15427c478bd9Sstevel@tonic-gate #ifdef DEBUG 15437c478bd9Sstevel@tonic-gate clstat4_debug.r_reclaim.value.ui64++; 15447c478bd9Sstevel@tonic-gate #endif 15457c478bd9Sstevel@tonic-gate freed = 0; 15467c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15477c478bd9Sstevel@tonic-gate while ((rp = rp4freelist) != NULL) { 15487c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 15497c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 15507c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 15517c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 15527c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 15537c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 15547c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 15557c478bd9Sstevel@tonic-gate vp->v_count--; 15567c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 15577c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 15587c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15597c478bd9Sstevel@tonic-gate continue; 15607c478bd9Sstevel@tonic-gate } 15617c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 15627c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 15637c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate /* 15667c478bd9Sstevel@tonic-gate * This call to rp_addfree will end up destroying the 15677c478bd9Sstevel@tonic-gate * rnode, but in a safe way with the appropriate set 15687c478bd9Sstevel@tonic-gate * of checks done. 15697c478bd9Sstevel@tonic-gate */ 15707c478bd9Sstevel@tonic-gate rp4_addfree(rp, CRED()); 15717c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15727c478bd9Sstevel@tonic-gate } 15737c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 15747c478bd9Sstevel@tonic-gate return (freed); 15757c478bd9Sstevel@tonic-gate } 15767c478bd9Sstevel@tonic-gate 15777c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15787c478bd9Sstevel@tonic-gate static void 15797c478bd9Sstevel@tonic-gate nfs4_reclaim(void *cdrarg) 15807c478bd9Sstevel@tonic-gate { 15817c478bd9Sstevel@tonic-gate #ifdef DEBUG 15827c478bd9Sstevel@tonic-gate clstat4_debug.reclaim.value.ui64++; 15837c478bd9Sstevel@tonic-gate #endif 15847c478bd9Sstevel@tonic-gate if (nfs4_free_reclaim()) 15857c478bd9Sstevel@tonic-gate return; 15867c478bd9Sstevel@tonic-gate 15877c478bd9Sstevel@tonic-gate if (nfs4_active_reclaim()) 15887c478bd9Sstevel@tonic-gate return; 15897c478bd9Sstevel@tonic-gate 15907c478bd9Sstevel@tonic-gate (void) nfs4_rnode_reclaim(); 15917c478bd9Sstevel@tonic-gate } 15927c478bd9Sstevel@tonic-gate 15937c478bd9Sstevel@tonic-gate /* 15947c478bd9Sstevel@tonic-gate * Returns the clientid4 to use for the given mntinfo4. Note that the 15957c478bd9Sstevel@tonic-gate * clientid can change if the caller drops mi_recovlock. 15967c478bd9Sstevel@tonic-gate */ 15977c478bd9Sstevel@tonic-gate 15987c478bd9Sstevel@tonic-gate clientid4 15997c478bd9Sstevel@tonic-gate mi2clientid(mntinfo4_t *mi) 16007c478bd9Sstevel@tonic-gate { 16017c478bd9Sstevel@tonic-gate nfs4_server_t *sp; 16027c478bd9Sstevel@tonic-gate clientid4 clientid = 0; 16037c478bd9Sstevel@tonic-gate 16047c478bd9Sstevel@tonic-gate /* this locks down sp if it is found */ 16057c478bd9Sstevel@tonic-gate sp = find_nfs4_server(mi); 16067c478bd9Sstevel@tonic-gate if (sp != NULL) { 16077c478bd9Sstevel@tonic-gate clientid = sp->clientid; 16087c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 16097c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 16107c478bd9Sstevel@tonic-gate } 16117c478bd9Sstevel@tonic-gate return (clientid); 16127c478bd9Sstevel@tonic-gate } 16137c478bd9Sstevel@tonic-gate 16147c478bd9Sstevel@tonic-gate /* 16157c478bd9Sstevel@tonic-gate * Return the current lease time for the server associated with the given 16167c478bd9Sstevel@tonic-gate * file. Note that the lease time could change immediately after this 16177c478bd9Sstevel@tonic-gate * call. 16187c478bd9Sstevel@tonic-gate */ 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate time_t 16217c478bd9Sstevel@tonic-gate r2lease_time(rnode4_t *rp) 16227c478bd9Sstevel@tonic-gate { 16237c478bd9Sstevel@tonic-gate nfs4_server_t *sp; 16247c478bd9Sstevel@tonic-gate time_t lease_time; 16257c478bd9Sstevel@tonic-gate mntinfo4_t *mi = VTOMI4(RTOV4(rp)); 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0); 16287c478bd9Sstevel@tonic-gate 16297c478bd9Sstevel@tonic-gate /* this locks down sp if it is found */ 16307c478bd9Sstevel@tonic-gate sp = find_nfs4_server(VTOMI4(RTOV4(rp))); 16317c478bd9Sstevel@tonic-gate 16327c478bd9Sstevel@tonic-gate if (VTOMI4(RTOV4(rp))->mi_vfsp->vfs_flag & VFS_UNMOUNTED) { 16337c478bd9Sstevel@tonic-gate if (sp != NULL) { 16347c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 16357c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 16367c478bd9Sstevel@tonic-gate } 16377c478bd9Sstevel@tonic-gate nfs_rw_exit(&mi->mi_recovlock); 16387c478bd9Sstevel@tonic-gate return (1); /* 1 second */ 16397c478bd9Sstevel@tonic-gate } 16407c478bd9Sstevel@tonic-gate 16417c478bd9Sstevel@tonic-gate ASSERT(sp != NULL); 16427c478bd9Sstevel@tonic-gate 16437c478bd9Sstevel@tonic-gate lease_time = sp->s_lease_time; 16447c478bd9Sstevel@tonic-gate 16457c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 16467c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 16477c478bd9Sstevel@tonic-gate nfs_rw_exit(&mi->mi_recovlock); 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate return (lease_time); 16507c478bd9Sstevel@tonic-gate } 16517c478bd9Sstevel@tonic-gate 16527c478bd9Sstevel@tonic-gate /* 16537c478bd9Sstevel@tonic-gate * Return a list with information about all the known open instances for 16547c478bd9Sstevel@tonic-gate * a filesystem. The caller must call r4releopenlist() when done with the 16557c478bd9Sstevel@tonic-gate * list. 16567c478bd9Sstevel@tonic-gate * 16577c478bd9Sstevel@tonic-gate * We are safe at looking at os_valid and os_pending_close across dropping 16587c478bd9Sstevel@tonic-gate * the 'os_sync_lock' to count up the number of open streams and then 16597c478bd9Sstevel@tonic-gate * allocate memory for the osp list due to: 16607c478bd9Sstevel@tonic-gate * -Looking at os_pending_close is safe since this routine is 16617c478bd9Sstevel@tonic-gate * only called via recovery, and os_pending_close can only be set via 16627c478bd9Sstevel@tonic-gate * a non-recovery operation (which are all blocked when recovery 16637c478bd9Sstevel@tonic-gate * is active). 16647c478bd9Sstevel@tonic-gate * 16657c478bd9Sstevel@tonic-gate * -Examining os_valid is safe since non-recovery operations, which 16667c478bd9Sstevel@tonic-gate * could potentially switch os_valid to 0, are blocked (via 16677c478bd9Sstevel@tonic-gate * nfs4_start_fop) and recovery is single-threaded per mntinfo4_t 16687c478bd9Sstevel@tonic-gate * (which means we are the only recovery thread potentially acting 16697c478bd9Sstevel@tonic-gate * on this open stream). 16707c478bd9Sstevel@tonic-gate */ 16717c478bd9Sstevel@tonic-gate 16727c478bd9Sstevel@tonic-gate nfs4_opinst_t * 16737c478bd9Sstevel@tonic-gate r4mkopenlist(mntinfo4_t *mi) 16747c478bd9Sstevel@tonic-gate { 16757c478bd9Sstevel@tonic-gate nfs4_opinst_t *reopenlist, *rep; 16767c478bd9Sstevel@tonic-gate rnode4_t *rp; 16777c478bd9Sstevel@tonic-gate vnode_t *vp; 16787c478bd9Sstevel@tonic-gate vfs_t *vfsp = mi->mi_vfsp; 16797c478bd9Sstevel@tonic-gate int numosp; 16807c478bd9Sstevel@tonic-gate nfs4_open_stream_t *osp; 16817c478bd9Sstevel@tonic-gate int index; 16827c478bd9Sstevel@tonic-gate open_delegation_type4 dtype; 16837c478bd9Sstevel@tonic-gate int hold_vnode; 16847c478bd9Sstevel@tonic-gate 16857c478bd9Sstevel@tonic-gate reopenlist = NULL; 16867c478bd9Sstevel@tonic-gate 16877c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 16887c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 16897c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 16907c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 16917c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 16927c478bd9Sstevel@tonic-gate 16937c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 16947c478bd9Sstevel@tonic-gate if (vp->v_vfsp != vfsp) 16957c478bd9Sstevel@tonic-gate continue; 16967c478bd9Sstevel@tonic-gate hold_vnode = 0; 16977c478bd9Sstevel@tonic-gate 16987c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_os_lock); 16997c478bd9Sstevel@tonic-gate 17007c478bd9Sstevel@tonic-gate /* Count the number of valid open_streams of the file */ 17017c478bd9Sstevel@tonic-gate numosp = 0; 17027c478bd9Sstevel@tonic-gate for (osp = list_head(&rp->r_open_streams); osp != NULL; 17037c478bd9Sstevel@tonic-gate osp = list_next(&rp->r_open_streams, osp)) { 17047c478bd9Sstevel@tonic-gate mutex_enter(&osp->os_sync_lock); 17057c478bd9Sstevel@tonic-gate if (osp->os_valid && !osp->os_pending_close) 17067c478bd9Sstevel@tonic-gate numosp++; 17077c478bd9Sstevel@tonic-gate mutex_exit(&osp->os_sync_lock); 17087c478bd9Sstevel@tonic-gate } 17097c478bd9Sstevel@tonic-gate 17107c478bd9Sstevel@tonic-gate /* Fill in the valid open streams per vp */ 17117c478bd9Sstevel@tonic-gate if (numosp > 0) { 17127c478bd9Sstevel@tonic-gate int j; 17137c478bd9Sstevel@tonic-gate 17147c478bd9Sstevel@tonic-gate hold_vnode = 1; 17157c478bd9Sstevel@tonic-gate 17167c478bd9Sstevel@tonic-gate /* 17177c478bd9Sstevel@tonic-gate * Add a new open instance to the list 17187c478bd9Sstevel@tonic-gate */ 17197c478bd9Sstevel@tonic-gate rep = kmem_zalloc(sizeof (*reopenlist), 1720b9238976Sth KM_SLEEP); 17217c478bd9Sstevel@tonic-gate rep->re_next = reopenlist; 17227c478bd9Sstevel@tonic-gate reopenlist = rep; 17237c478bd9Sstevel@tonic-gate 17247c478bd9Sstevel@tonic-gate rep->re_vp = vp; 17257c478bd9Sstevel@tonic-gate rep->re_osp = kmem_zalloc( 1726b9238976Sth numosp * sizeof (*(rep->re_osp)), 1727b9238976Sth KM_SLEEP); 17287c478bd9Sstevel@tonic-gate rep->re_numosp = numosp; 17297c478bd9Sstevel@tonic-gate 17307c478bd9Sstevel@tonic-gate j = 0; 17317c478bd9Sstevel@tonic-gate for (osp = list_head(&rp->r_open_streams); 17327c478bd9Sstevel@tonic-gate osp != NULL; 17337c478bd9Sstevel@tonic-gate osp = list_next(&rp->r_open_streams, osp)) { 17347c478bd9Sstevel@tonic-gate 17357c478bd9Sstevel@tonic-gate mutex_enter(&osp->os_sync_lock); 17367c478bd9Sstevel@tonic-gate if (osp->os_valid && 17377c478bd9Sstevel@tonic-gate !osp->os_pending_close) { 17387c478bd9Sstevel@tonic-gate osp->os_ref_count++; 17397c478bd9Sstevel@tonic-gate rep->re_osp[j] = osp; 17407c478bd9Sstevel@tonic-gate j++; 17417c478bd9Sstevel@tonic-gate } 17427c478bd9Sstevel@tonic-gate mutex_exit(&osp->os_sync_lock); 17437c478bd9Sstevel@tonic-gate } 17447c478bd9Sstevel@tonic-gate /* 17457c478bd9Sstevel@tonic-gate * Assuming valid osp(s) stays valid between 17467c478bd9Sstevel@tonic-gate * the time obtaining j and numosp. 17477c478bd9Sstevel@tonic-gate */ 17487c478bd9Sstevel@tonic-gate ASSERT(j == numosp); 17497c478bd9Sstevel@tonic-gate } 17507c478bd9Sstevel@tonic-gate 17517c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 17527c478bd9Sstevel@tonic-gate /* do this here to keep v_lock > r_os_lock */ 17537c478bd9Sstevel@tonic-gate if (hold_vnode) 17547c478bd9Sstevel@tonic-gate VN_HOLD(vp); 17557c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statev4_lock); 17567c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 17577c478bd9Sstevel@tonic-gate /* 17587c478bd9Sstevel@tonic-gate * If this rnode holds a delegation, 17597c478bd9Sstevel@tonic-gate * but if there are no valid open streams, 17607c478bd9Sstevel@tonic-gate * then just discard the delegation 17617c478bd9Sstevel@tonic-gate * without doing delegreturn. 17627c478bd9Sstevel@tonic-gate */ 17637c478bd9Sstevel@tonic-gate if (numosp > 0) 17647c478bd9Sstevel@tonic-gate rp->r_deleg_needs_recovery = 1765b9238976Sth rp->r_deleg_type; 17667c478bd9Sstevel@tonic-gate } 17677c478bd9Sstevel@tonic-gate /* Save the delegation type for use outside the lock */ 17687c478bd9Sstevel@tonic-gate dtype = rp->r_deleg_type; 17697c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statev4_lock); 17707c478bd9Sstevel@tonic-gate 17717c478bd9Sstevel@tonic-gate /* 17727c478bd9Sstevel@tonic-gate * If we have a delegation then get rid of it. 17737c478bd9Sstevel@tonic-gate * We've set rp->r_deleg_needs_recovery so we have 17747c478bd9Sstevel@tonic-gate * enough information to recover. 17757c478bd9Sstevel@tonic-gate */ 17767c478bd9Sstevel@tonic-gate if (dtype != OPEN_DELEGATE_NONE) { 17777c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, NFS4_DR_DISCARD); 17787c478bd9Sstevel@tonic-gate } 17797c478bd9Sstevel@tonic-gate } 17807c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 17817c478bd9Sstevel@tonic-gate } 17827c478bd9Sstevel@tonic-gate return (reopenlist); 17837c478bd9Sstevel@tonic-gate } 17847c478bd9Sstevel@tonic-gate 17852f172c55SRobert Thurlow /* 17862f172c55SRobert Thurlow * Given a filesystem id, check to see if any rnodes 17872f172c55SRobert Thurlow * within this fsid reside in the rnode cache, other 17882f172c55SRobert Thurlow * than one we know about. 17892f172c55SRobert Thurlow * 17902f172c55SRobert Thurlow * Return 1 if an rnode is found, 0 otherwise 17912f172c55SRobert Thurlow */ 17922f172c55SRobert Thurlow int 17932f172c55SRobert Thurlow r4find_by_fsid(mntinfo4_t *mi, fattr4_fsid *moved_fsid) 17942f172c55SRobert Thurlow { 17952f172c55SRobert Thurlow rnode4_t *rp; 17962f172c55SRobert Thurlow vnode_t *vp; 17972f172c55SRobert Thurlow vfs_t *vfsp = mi->mi_vfsp; 17982f172c55SRobert Thurlow fattr4_fsid *fsid; 17992f172c55SRobert Thurlow int index, found = 0; 18002f172c55SRobert Thurlow 18012f172c55SRobert Thurlow for (index = 0; index < rtable4size; index++) { 18022f172c55SRobert Thurlow rw_enter(&rtable4[index].r_lock, RW_READER); 18032f172c55SRobert Thurlow for (rp = rtable4[index].r_hashf; 18042f172c55SRobert Thurlow rp != (rnode4_t *)(&rtable4[index]); 18052f172c55SRobert Thurlow rp = rp->r_hashf) { 18062f172c55SRobert Thurlow 18072f172c55SRobert Thurlow vp = RTOV4(rp); 18082f172c55SRobert Thurlow if (vp->v_vfsp != vfsp) 18092f172c55SRobert Thurlow continue; 18102f172c55SRobert Thurlow 18112f172c55SRobert Thurlow /* 18122f172c55SRobert Thurlow * XXX there might be a case where a 18132f172c55SRobert Thurlow * replicated fs may have the same fsid 18142f172c55SRobert Thurlow * across two different servers. This 18152f172c55SRobert Thurlow * check isn't good enough in that case 18162f172c55SRobert Thurlow */ 18172f172c55SRobert Thurlow fsid = &rp->r_srv_fsid; 18182f172c55SRobert Thurlow if (FATTR4_FSID_EQ(moved_fsid, fsid)) { 18192f172c55SRobert Thurlow found = 1; 18202f172c55SRobert Thurlow break; 18212f172c55SRobert Thurlow } 18222f172c55SRobert Thurlow } 18232f172c55SRobert Thurlow rw_exit(&rtable4[index].r_lock); 18242f172c55SRobert Thurlow 18252f172c55SRobert Thurlow if (found) 18262f172c55SRobert Thurlow break; 18272f172c55SRobert Thurlow } 18282f172c55SRobert Thurlow return (found); 18292f172c55SRobert Thurlow } 18302f172c55SRobert Thurlow 18317c478bd9Sstevel@tonic-gate /* 18327c478bd9Sstevel@tonic-gate * Release the list of open instance references. 18337c478bd9Sstevel@tonic-gate */ 18347c478bd9Sstevel@tonic-gate 18357c478bd9Sstevel@tonic-gate void 18367c478bd9Sstevel@tonic-gate r4releopenlist(nfs4_opinst_t *reopenp) 18377c478bd9Sstevel@tonic-gate { 18387c478bd9Sstevel@tonic-gate nfs4_opinst_t *rep, *next; 18397c478bd9Sstevel@tonic-gate int i; 18407c478bd9Sstevel@tonic-gate 18417c478bd9Sstevel@tonic-gate for (rep = reopenp; rep; rep = next) { 18427c478bd9Sstevel@tonic-gate next = rep->re_next; 18437c478bd9Sstevel@tonic-gate 18447c478bd9Sstevel@tonic-gate for (i = 0; i < rep->re_numosp; i++) 1845b9238976Sth open_stream_rele(rep->re_osp[i], VTOR4(rep->re_vp)); 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate VN_RELE(rep->re_vp); 18487c478bd9Sstevel@tonic-gate kmem_free(rep->re_osp, 18497c478bd9Sstevel@tonic-gate rep->re_numosp * sizeof (*(rep->re_osp))); 18507c478bd9Sstevel@tonic-gate 18517c478bd9Sstevel@tonic-gate kmem_free(rep, sizeof (*rep)); 18527c478bd9Sstevel@tonic-gate } 18537c478bd9Sstevel@tonic-gate } 18547c478bd9Sstevel@tonic-gate 18557c478bd9Sstevel@tonic-gate int 18567c478bd9Sstevel@tonic-gate nfs4_rnode_init(void) 18577c478bd9Sstevel@tonic-gate { 18587c478bd9Sstevel@tonic-gate ulong_t nrnode4_max; 18597c478bd9Sstevel@tonic-gate int i; 18607c478bd9Sstevel@tonic-gate 18617c478bd9Sstevel@tonic-gate /* 18627c478bd9Sstevel@tonic-gate * Compute the size of the rnode4 hash table 18637c478bd9Sstevel@tonic-gate */ 18647c478bd9Sstevel@tonic-gate if (nrnode <= 0) 18657c478bd9Sstevel@tonic-gate nrnode = ncsize; 18667c478bd9Sstevel@tonic-gate nrnode4_max = 18677c478bd9Sstevel@tonic-gate (ulong_t)((kmem_maxavail() >> 2) / sizeof (struct rnode4)); 18687c478bd9Sstevel@tonic-gate if (nrnode > nrnode4_max || (nrnode == 0 && ncsize == 0)) { 18697c478bd9Sstevel@tonic-gate zcmn_err(GLOBAL_ZONEID, CE_NOTE, 1870f5654033SAlexander Eremin "!setting nrnode to max value of %ld", nrnode4_max); 18717c478bd9Sstevel@tonic-gate nrnode = nrnode4_max; 18727c478bd9Sstevel@tonic-gate } 18737c478bd9Sstevel@tonic-gate rtable4size = 1 << highbit(nrnode / rnode4_hashlen); 18747c478bd9Sstevel@tonic-gate rtable4mask = rtable4size - 1; 18757c478bd9Sstevel@tonic-gate 18767c478bd9Sstevel@tonic-gate /* 18777c478bd9Sstevel@tonic-gate * Allocate and initialize the hash buckets 18787c478bd9Sstevel@tonic-gate */ 18797c478bd9Sstevel@tonic-gate rtable4 = kmem_alloc(rtable4size * sizeof (*rtable4), KM_SLEEP); 18807c478bd9Sstevel@tonic-gate for (i = 0; i < rtable4size; i++) { 18817c478bd9Sstevel@tonic-gate rtable4[i].r_hashf = (rnode4_t *)(&rtable4[i]); 18827c478bd9Sstevel@tonic-gate rtable4[i].r_hashb = (rnode4_t *)(&rtable4[i]); 18837c478bd9Sstevel@tonic-gate rw_init(&rtable4[i].r_lock, NULL, RW_DEFAULT, NULL); 18847c478bd9Sstevel@tonic-gate } 18857c478bd9Sstevel@tonic-gate 18867c478bd9Sstevel@tonic-gate rnode4_cache = kmem_cache_create("rnode4_cache", sizeof (rnode4_t), 18877c478bd9Sstevel@tonic-gate 0, NULL, NULL, nfs4_reclaim, NULL, NULL, 0); 18887c478bd9Sstevel@tonic-gate 18897c478bd9Sstevel@tonic-gate return (0); 18907c478bd9Sstevel@tonic-gate } 18917c478bd9Sstevel@tonic-gate 18927c478bd9Sstevel@tonic-gate int 18937c478bd9Sstevel@tonic-gate nfs4_rnode_fini(void) 18947c478bd9Sstevel@tonic-gate { 18957c478bd9Sstevel@tonic-gate int i; 18967c478bd9Sstevel@tonic-gate 18977c478bd9Sstevel@tonic-gate /* 18987c478bd9Sstevel@tonic-gate * Deallocate the rnode hash queues 18997c478bd9Sstevel@tonic-gate */ 19007c478bd9Sstevel@tonic-gate kmem_cache_destroy(rnode4_cache); 19017c478bd9Sstevel@tonic-gate 19027c478bd9Sstevel@tonic-gate for (i = 0; i < rtable4size; i++) 19037c478bd9Sstevel@tonic-gate rw_destroy(&rtable4[i].r_lock); 19047c478bd9Sstevel@tonic-gate 19057c478bd9Sstevel@tonic-gate kmem_free(rtable4, rtable4size * sizeof (*rtable4)); 19067c478bd9Sstevel@tonic-gate 19077c478bd9Sstevel@tonic-gate return (0); 19087c478bd9Sstevel@tonic-gate } 19097c478bd9Sstevel@tonic-gate 19107c478bd9Sstevel@tonic-gate /* 19117c478bd9Sstevel@tonic-gate * Return non-zero if the given filehandle refers to the root filehandle 19127c478bd9Sstevel@tonic-gate * for the given rnode. 19137c478bd9Sstevel@tonic-gate */ 19147c478bd9Sstevel@tonic-gate 19157c478bd9Sstevel@tonic-gate static int 19167c478bd9Sstevel@tonic-gate isrootfh(nfs4_sharedfh_t *fh, rnode4_t *rp) 19177c478bd9Sstevel@tonic-gate { 19187c478bd9Sstevel@tonic-gate int isroot; 19197c478bd9Sstevel@tonic-gate 19207c478bd9Sstevel@tonic-gate isroot = 0; 19217c478bd9Sstevel@tonic-gate if (SFH4_SAME(VTOMI4(RTOV4(rp))->mi_rootfh, fh)) 19227c478bd9Sstevel@tonic-gate isroot = 1; 19237c478bd9Sstevel@tonic-gate 19247c478bd9Sstevel@tonic-gate return (isroot); 19257c478bd9Sstevel@tonic-gate } 19267c478bd9Sstevel@tonic-gate 1927b9238976Sth /* 1928b9238976Sth * The r4_stub_* routines assume that the rnode is newly activated, and 1929b9238976Sth * that the caller either holds the hash bucket r_lock for this rnode as 1930b9238976Sth * RW_WRITER, or holds r_statelock. 1931b9238976Sth */ 1932b9238976Sth static void 1933b9238976Sth r4_stub_set(rnode4_t *rp, nfs4_stub_type_t type) 1934b9238976Sth { 1935b9238976Sth vnode_t *vp = RTOV4(rp); 1936b9238976Sth krwlock_t *hash_lock = &rp->r_hashq->r_lock; 1937b9238976Sth 1938b9238976Sth ASSERT(RW_WRITE_HELD(hash_lock) || MUTEX_HELD(&rp->r_statelock)); 1939b9238976Sth 1940b9238976Sth rp->r_stub_type = type; 1941b9238976Sth 1942b9238976Sth /* 1943b9238976Sth * Safely switch this vnode to the trigger vnodeops. 1944b9238976Sth * 1945b9238976Sth * Currently, we don't ever switch a trigger vnode back to using 1946b9238976Sth * "regular" v4 vnodeops. NFS4_STUB_NONE is only used to note that 1947b9238976Sth * a new v4 object is not a trigger, and it will already have the 1948b9238976Sth * correct v4 vnodeops by default. So, no "else" case required here. 1949b9238976Sth */ 1950b9238976Sth if (type != NFS4_STUB_NONE) 1951b9238976Sth vn_setops(vp, nfs4_trigger_vnodeops); 1952b9238976Sth } 1953b9238976Sth 1954b9238976Sth void 1955b9238976Sth r4_stub_mirrormount(rnode4_t *rp) 1956b9238976Sth { 1957b9238976Sth r4_stub_set(rp, NFS4_STUB_MIRRORMOUNT); 1958b9238976Sth } 1959b9238976Sth 19602f172c55SRobert Thurlow void 19612f172c55SRobert Thurlow r4_stub_referral(rnode4_t *rp) 19622f172c55SRobert Thurlow { 19632f172c55SRobert Thurlow DTRACE_PROBE1(nfs4clnt__func__referral__moved, 19642f172c55SRobert Thurlow vnode_t *, RTOV4(rp)); 19652f172c55SRobert Thurlow r4_stub_set(rp, NFS4_STUB_REFERRAL); 19662f172c55SRobert Thurlow } 19672f172c55SRobert Thurlow 1968b9238976Sth void 1969b9238976Sth r4_stub_none(rnode4_t *rp) 1970b9238976Sth { 1971b9238976Sth r4_stub_set(rp, NFS4_STUB_NONE); 1972b9238976Sth } 1973b9238976Sth 19747c478bd9Sstevel@tonic-gate #ifdef DEBUG 19757c478bd9Sstevel@tonic-gate 19767c478bd9Sstevel@tonic-gate /* 19777c478bd9Sstevel@tonic-gate * Look in the rnode table for other rnodes that have the same filehandle. 19787c478bd9Sstevel@tonic-gate * Assume the lock is held for the hash chain of checkrp 19797c478bd9Sstevel@tonic-gate */ 19807c478bd9Sstevel@tonic-gate 19817c478bd9Sstevel@tonic-gate static void 19827c478bd9Sstevel@tonic-gate r4_dup_check(rnode4_t *checkrp, vfs_t *vfsp) 19837c478bd9Sstevel@tonic-gate { 19847c478bd9Sstevel@tonic-gate rnode4_t *rp; 19857c478bd9Sstevel@tonic-gate vnode_t *tvp; 19867c478bd9Sstevel@tonic-gate nfs4_fhandle_t fh, fh2; 19877c478bd9Sstevel@tonic-gate int index; 19887c478bd9Sstevel@tonic-gate 19897c478bd9Sstevel@tonic-gate if (!r4_check_for_dups) 19907c478bd9Sstevel@tonic-gate return; 19917c478bd9Sstevel@tonic-gate 19927c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&checkrp->r_hashq->r_lock)); 19937c478bd9Sstevel@tonic-gate 19947c478bd9Sstevel@tonic-gate sfh4_copyval(checkrp->r_fh, &fh); 19957c478bd9Sstevel@tonic-gate 19967c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 19977c478bd9Sstevel@tonic-gate 19987c478bd9Sstevel@tonic-gate if (&rtable4[index] != checkrp->r_hashq) 19997c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 20007c478bd9Sstevel@tonic-gate 20017c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 20027c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 20037c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 20047c478bd9Sstevel@tonic-gate 20057c478bd9Sstevel@tonic-gate if (rp == checkrp) 20067c478bd9Sstevel@tonic-gate continue; 20077c478bd9Sstevel@tonic-gate 20087c478bd9Sstevel@tonic-gate tvp = RTOV4(rp); 20097c478bd9Sstevel@tonic-gate if (tvp->v_vfsp != vfsp) 20107c478bd9Sstevel@tonic-gate continue; 20117c478bd9Sstevel@tonic-gate 20127c478bd9Sstevel@tonic-gate sfh4_copyval(rp->r_fh, &fh2); 20137c478bd9Sstevel@tonic-gate if (nfs4cmpfhandle(&fh, &fh2) == 0) { 20147c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "rnodes with same fs, fh " 20157c478bd9Sstevel@tonic-gate "(%p, %p)", (void *)checkrp, (void *)rp); 20167c478bd9Sstevel@tonic-gate } 20177c478bd9Sstevel@tonic-gate } 20187c478bd9Sstevel@tonic-gate 20197c478bd9Sstevel@tonic-gate if (&rtable4[index] != checkrp->r_hashq) 20207c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 20217c478bd9Sstevel@tonic-gate } 20227c478bd9Sstevel@tonic-gate } 20237c478bd9Sstevel@tonic-gate 20247c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 2025