17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 59f9e2373Sjwahlig * Common Development and Distribution License (the "License"). 69f9e2373Sjwahlig * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 224151f947SPavel Filipensky * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* 277c478bd9Sstevel@tonic-gate * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 287c478bd9Sstevel@tonic-gate * All Rights Reserved 297c478bd9Sstevel@tonic-gate */ 307c478bd9Sstevel@tonic-gate 317c478bd9Sstevel@tonic-gate 327c478bd9Sstevel@tonic-gate #include <sys/param.h> 337c478bd9Sstevel@tonic-gate #include <sys/types.h> 347c478bd9Sstevel@tonic-gate #include <sys/systm.h> 357c478bd9Sstevel@tonic-gate #include <sys/cred.h> 367c478bd9Sstevel@tonic-gate #include <sys/proc.h> 377c478bd9Sstevel@tonic-gate #include <sys/user.h> 387c478bd9Sstevel@tonic-gate #include <sys/time.h> 397c478bd9Sstevel@tonic-gate #include <sys/buf.h> 407c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 417c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 427c478bd9Sstevel@tonic-gate #include <sys/socket.h> 437c478bd9Sstevel@tonic-gate #include <sys/uio.h> 447c478bd9Sstevel@tonic-gate #include <sys/tiuser.h> 457c478bd9Sstevel@tonic-gate #include <sys/swap.h> 467c478bd9Sstevel@tonic-gate #include <sys/errno.h> 477c478bd9Sstevel@tonic-gate #include <sys/debug.h> 487c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 497c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 507c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 517c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 527c478bd9Sstevel@tonic-gate #include <sys/session.h> 537c478bd9Sstevel@tonic-gate #include <sys/dnlc.h> 547c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 557c478bd9Sstevel@tonic-gate #include <sys/acl.h> 567c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 577c478bd9Sstevel@tonic-gate #include <sys/pathname.h> 587c478bd9Sstevel@tonic-gate #include <sys/flock.h> 597c478bd9Sstevel@tonic-gate #include <sys/dirent.h> 607c478bd9Sstevel@tonic-gate #include <sys/flock.h> 617c478bd9Sstevel@tonic-gate #include <sys/callb.h> 62*6962f5b8SThomas Haynes #include <sys/sdt.h> 637c478bd9Sstevel@tonic-gate 647c478bd9Sstevel@tonic-gate #include <rpc/types.h> 657c478bd9Sstevel@tonic-gate #include <rpc/xdr.h> 667c478bd9Sstevel@tonic-gate #include <rpc/auth.h> 677c478bd9Sstevel@tonic-gate #include <rpc/rpcsec_gss.h> 687c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 697c478bd9Sstevel@tonic-gate 707c478bd9Sstevel@tonic-gate #include <nfs/nfs.h> 717c478bd9Sstevel@tonic-gate #include <nfs/nfs_clnt.h> 727c478bd9Sstevel@tonic-gate #include <nfs/nfs_acl.h> 737c478bd9Sstevel@tonic-gate 747c478bd9Sstevel@tonic-gate #include <nfs/nfs4.h> 757c478bd9Sstevel@tonic-gate #include <nfs/rnode4.h> 767c478bd9Sstevel@tonic-gate #include <nfs/nfs4_clnt.h> 777c478bd9Sstevel@tonic-gate 787c478bd9Sstevel@tonic-gate /* 797c478bd9Sstevel@tonic-gate * The hash queues for the access to active and cached rnodes 807c478bd9Sstevel@tonic-gate * are organized as doubly linked lists. A reader/writer lock 817c478bd9Sstevel@tonic-gate * for each hash bucket is used to control access and to synchronize 827c478bd9Sstevel@tonic-gate * lookups, additions, and deletions from the hash queue. 837c478bd9Sstevel@tonic-gate * 847c478bd9Sstevel@tonic-gate * The rnode freelist is organized as a doubly linked list with 857c478bd9Sstevel@tonic-gate * a head pointer. Additions and deletions are synchronized via 867c478bd9Sstevel@tonic-gate * a single mutex. 877c478bd9Sstevel@tonic-gate * 887c478bd9Sstevel@tonic-gate * In order to add an rnode to the free list, it must be hashed into 897c478bd9Sstevel@tonic-gate * a hash queue and the exclusive lock to the hash queue be held. 907c478bd9Sstevel@tonic-gate * If an rnode is not hashed into a hash queue, then it is destroyed 917c478bd9Sstevel@tonic-gate * because it represents no valuable information that can be reused 927c478bd9Sstevel@tonic-gate * about the file. The exclusive lock to the hash queue must be 937c478bd9Sstevel@tonic-gate * held in order to prevent a lookup in the hash queue from finding 947c478bd9Sstevel@tonic-gate * the rnode and using it and assuming that the rnode is not on the 957c478bd9Sstevel@tonic-gate * freelist. The lookup in the hash queue will have the hash queue 967c478bd9Sstevel@tonic-gate * locked, either exclusive or shared. 977c478bd9Sstevel@tonic-gate * 987c478bd9Sstevel@tonic-gate * The vnode reference count for each rnode is not allowed to drop 997c478bd9Sstevel@tonic-gate * below 1. This prevents external entities, such as the VM 1007c478bd9Sstevel@tonic-gate * subsystem, from acquiring references to vnodes already on the 1017c478bd9Sstevel@tonic-gate * freelist and then trying to place them back on the freelist 1027c478bd9Sstevel@tonic-gate * when their reference is released. This means that the when an 1037c478bd9Sstevel@tonic-gate * rnode is looked up in the hash queues, then either the rnode 104da6c28aaSamw * is removed from the freelist and that reference is transferred to 1057c478bd9Sstevel@tonic-gate * the new reference or the vnode reference count must be incremented 1067c478bd9Sstevel@tonic-gate * accordingly. The mutex for the freelist must be held in order to 1077c478bd9Sstevel@tonic-gate * accurately test to see if the rnode is on the freelist or not. 1087c478bd9Sstevel@tonic-gate * The hash queue lock might be held shared and it is possible that 1097c478bd9Sstevel@tonic-gate * two different threads may race to remove the rnode from the 1107c478bd9Sstevel@tonic-gate * freelist. This race can be resolved by holding the mutex for the 1117c478bd9Sstevel@tonic-gate * freelist. Please note that the mutex for the freelist does not 1127c478bd9Sstevel@tonic-gate * need to be held if the rnode is not on the freelist. It can not be 1137c478bd9Sstevel@tonic-gate * placed on the freelist due to the requirement that the thread 1147c478bd9Sstevel@tonic-gate * putting the rnode on the freelist must hold the exclusive lock 1157c478bd9Sstevel@tonic-gate * to the hash queue and the thread doing the lookup in the hash 1167c478bd9Sstevel@tonic-gate * queue is holding either a shared or exclusive lock to the hash 1177c478bd9Sstevel@tonic-gate * queue. 1187c478bd9Sstevel@tonic-gate * 1197c478bd9Sstevel@tonic-gate * The lock ordering is: 1207c478bd9Sstevel@tonic-gate * 1217c478bd9Sstevel@tonic-gate * hash bucket lock -> vnode lock 1222d1fef97Ssamf * hash bucket lock -> freelist lock -> r_statelock 1237c478bd9Sstevel@tonic-gate */ 1247c478bd9Sstevel@tonic-gate r4hashq_t *rtable4; 1257c478bd9Sstevel@tonic-gate 1267c478bd9Sstevel@tonic-gate static kmutex_t rp4freelist_lock; 1277c478bd9Sstevel@tonic-gate static rnode4_t *rp4freelist = NULL; 1287c478bd9Sstevel@tonic-gate static long rnode4_new = 0; 1297c478bd9Sstevel@tonic-gate int rtable4size; 1307c478bd9Sstevel@tonic-gate static int rtable4mask; 1317c478bd9Sstevel@tonic-gate static struct kmem_cache *rnode4_cache; 1327c478bd9Sstevel@tonic-gate static int rnode4_hashlen = 4; 1337c478bd9Sstevel@tonic-gate 1347c478bd9Sstevel@tonic-gate static void r4inactive(rnode4_t *, cred_t *); 1357c478bd9Sstevel@tonic-gate static vnode_t *make_rnode4(nfs4_sharedfh_t *, r4hashq_t *, struct vfs *, 1367c478bd9Sstevel@tonic-gate struct vnodeops *, 1377c478bd9Sstevel@tonic-gate int (*)(vnode_t *, page_t *, u_offset_t *, size_t *, int, 1387c478bd9Sstevel@tonic-gate cred_t *), 1397c478bd9Sstevel@tonic-gate int *, cred_t *); 1407c478bd9Sstevel@tonic-gate static void rp4_rmfree(rnode4_t *); 1417c478bd9Sstevel@tonic-gate int nfs4_free_data_reclaim(rnode4_t *); 1427c478bd9Sstevel@tonic-gate static int nfs4_active_data_reclaim(rnode4_t *); 1437c478bd9Sstevel@tonic-gate static int nfs4_free_reclaim(void); 1447c478bd9Sstevel@tonic-gate static int nfs4_active_reclaim(void); 1457c478bd9Sstevel@tonic-gate static int nfs4_rnode_reclaim(void); 1467c478bd9Sstevel@tonic-gate static void nfs4_reclaim(void *); 1477c478bd9Sstevel@tonic-gate static int isrootfh(nfs4_sharedfh_t *, rnode4_t *); 1487c478bd9Sstevel@tonic-gate static void uninit_rnode4(rnode4_t *); 1497c478bd9Sstevel@tonic-gate static void destroy_rnode4(rnode4_t *); 150b9238976Sth static void r4_stub_set(rnode4_t *, nfs4_stub_type_t); 1517c478bd9Sstevel@tonic-gate 1527c478bd9Sstevel@tonic-gate #ifdef DEBUG 1537c478bd9Sstevel@tonic-gate static int r4_check_for_dups = 0; /* Flag to enable dup rnode detection. */ 1547c478bd9Sstevel@tonic-gate static int nfs4_rnode_debug = 0; 1557c478bd9Sstevel@tonic-gate /* if nonzero, kmem_cache_free() rnodes rather than place on freelist */ 1567c478bd9Sstevel@tonic-gate static int nfs4_rnode_nofreelist = 0; 1577c478bd9Sstevel@tonic-gate /* give messages on colliding shared filehandles */ 1587c478bd9Sstevel@tonic-gate static void r4_dup_check(rnode4_t *, vfs_t *); 1597c478bd9Sstevel@tonic-gate #endif 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate /* 1629f9e2373Sjwahlig * If the vnode has pages, run the list and check for any that are 1639f9e2373Sjwahlig * still dangling. We call this routine before putting an rnode on 1649f9e2373Sjwahlig * the free list. 1659f9e2373Sjwahlig */ 1669f9e2373Sjwahlig static int 1679f9e2373Sjwahlig nfs4_dross_pages(vnode_t *vp) 1689f9e2373Sjwahlig { 1699f9e2373Sjwahlig page_t *pp; 1709f9e2373Sjwahlig kmutex_t *vphm; 1719f9e2373Sjwahlig 1729f9e2373Sjwahlig vphm = page_vnode_mutex(vp); 1739f9e2373Sjwahlig mutex_enter(vphm); 1749f9e2373Sjwahlig if ((pp = vp->v_pages) != NULL) { 1759f9e2373Sjwahlig do { 1769f9e2373Sjwahlig if (pp->p_fsdata != C_NOCOMMIT) { 1779f9e2373Sjwahlig mutex_exit(vphm); 1789f9e2373Sjwahlig return (1); 1799f9e2373Sjwahlig } 1809f9e2373Sjwahlig } while ((pp = pp->p_vpnext) != vp->v_pages); 1819f9e2373Sjwahlig } 1829f9e2373Sjwahlig mutex_exit(vphm); 1839f9e2373Sjwahlig 1849f9e2373Sjwahlig return (0); 1859f9e2373Sjwahlig } 1869f9e2373Sjwahlig 1879f9e2373Sjwahlig /* 1889f9e2373Sjwahlig * Flush any pages left on this rnode. 1897c478bd9Sstevel@tonic-gate */ 1907c478bd9Sstevel@tonic-gate static void 1919f9e2373Sjwahlig r4flushpages(rnode4_t *rp, cred_t *cr) 1927c478bd9Sstevel@tonic-gate { 1937c478bd9Sstevel@tonic-gate vnode_t *vp; 1947c478bd9Sstevel@tonic-gate int error; 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate /* 1977c478bd9Sstevel@tonic-gate * Before freeing anything, wait until all asynchronous 1987c478bd9Sstevel@tonic-gate * activity is done on this rnode. This will allow all 1997c478bd9Sstevel@tonic-gate * asynchronous read ahead and write behind i/o's to 2007c478bd9Sstevel@tonic-gate * finish. 2017c478bd9Sstevel@tonic-gate */ 2027c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2037c478bd9Sstevel@tonic-gate while (rp->r_count > 0) 2047c478bd9Sstevel@tonic-gate cv_wait(&rp->r_cv, &rp->r_statelock); 2057c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2067c478bd9Sstevel@tonic-gate 2077c478bd9Sstevel@tonic-gate /* 2087c478bd9Sstevel@tonic-gate * Flush and invalidate all pages associated with the vnode. 2097c478bd9Sstevel@tonic-gate */ 2107c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 2117c478bd9Sstevel@tonic-gate if (nfs4_has_pages(vp)) { 2127c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 2137c478bd9Sstevel@tonic-gate if ((rp->r_flags & R4DIRTY) && !rp->r_error) { 214da6c28aaSamw error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, 0, cr, NULL); 2157c478bd9Sstevel@tonic-gate if (error && (error == ENOSPC || error == EDQUOT)) { 2167c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2177c478bd9Sstevel@tonic-gate if (!rp->r_error) 2187c478bd9Sstevel@tonic-gate rp->r_error = error; 2197c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2207c478bd9Sstevel@tonic-gate } 2217c478bd9Sstevel@tonic-gate } 2227c478bd9Sstevel@tonic-gate nfs4_invalidate_pages(vp, (u_offset_t)0, cr); 2237c478bd9Sstevel@tonic-gate } 2249f9e2373Sjwahlig } 2259f9e2373Sjwahlig 2269f9e2373Sjwahlig /* 2279f9e2373Sjwahlig * Free the resources associated with an rnode. 2289f9e2373Sjwahlig */ 2299f9e2373Sjwahlig static void 2309f9e2373Sjwahlig r4inactive(rnode4_t *rp, cred_t *cr) 2319f9e2373Sjwahlig { 2329f9e2373Sjwahlig vnode_t *vp; 2339f9e2373Sjwahlig char *contents; 2349f9e2373Sjwahlig int size; 2359f9e2373Sjwahlig vsecattr_t *vsp; 2369f9e2373Sjwahlig vnode_t *xattr; 2379f9e2373Sjwahlig 2389f9e2373Sjwahlig r4flushpages(rp, cr); 2399f9e2373Sjwahlig 2409f9e2373Sjwahlig vp = RTOV4(rp); 2417c478bd9Sstevel@tonic-gate 2427c478bd9Sstevel@tonic-gate /* 2437c478bd9Sstevel@tonic-gate * Free any held caches which may be 2447c478bd9Sstevel@tonic-gate * associated with this rnode. 2457c478bd9Sstevel@tonic-gate */ 2467c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2477c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 2487c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 2497c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 2507c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 2517c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 2527c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 2537c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 2547c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2557c478bd9Sstevel@tonic-gate 2567c478bd9Sstevel@tonic-gate /* 2577c478bd9Sstevel@tonic-gate * Free the access cache entries. 2587c478bd9Sstevel@tonic-gate */ 2597c478bd9Sstevel@tonic-gate (void) nfs4_access_purge_rp(rp); 2607c478bd9Sstevel@tonic-gate 2617c478bd9Sstevel@tonic-gate /* 2627c478bd9Sstevel@tonic-gate * Free the readdir cache entries. 2637c478bd9Sstevel@tonic-gate */ 2647c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(vp); 2657c478bd9Sstevel@tonic-gate 2667c478bd9Sstevel@tonic-gate /* 2677c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 2687c478bd9Sstevel@tonic-gate */ 2697c478bd9Sstevel@tonic-gate if (contents != NULL) { 2707c478bd9Sstevel@tonic-gate 2717c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 2727c478bd9Sstevel@tonic-gate } 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate /* 2757c478bd9Sstevel@tonic-gate * Free any cached ACL. 2767c478bd9Sstevel@tonic-gate */ 2777c478bd9Sstevel@tonic-gate if (vsp != NULL) 2787c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate /* 2817c478bd9Sstevel@tonic-gate * Release the cached xattr_dir 2827c478bd9Sstevel@tonic-gate */ 2837c478bd9Sstevel@tonic-gate if (xattr != NULL) 2847c478bd9Sstevel@tonic-gate VN_RELE(xattr); 2857c478bd9Sstevel@tonic-gate } 2867c478bd9Sstevel@tonic-gate 2877c478bd9Sstevel@tonic-gate /* 2887c478bd9Sstevel@tonic-gate * We have seen a case that the fh passed in is for "." which 2897c478bd9Sstevel@tonic-gate * should be a VROOT node, however, the fh is different from the 2907c478bd9Sstevel@tonic-gate * root fh stored in the mntinfo4_t. The invalid fh might be 2917c478bd9Sstevel@tonic-gate * from a misbehaved server and will panic the client system at 2927c478bd9Sstevel@tonic-gate * a later time. To avoid the panic, we drop the bad fh, use 2937c478bd9Sstevel@tonic-gate * the root fh from mntinfo4_t, and print an error message 2947c478bd9Sstevel@tonic-gate * for attention. 2957c478bd9Sstevel@tonic-gate */ 2967c478bd9Sstevel@tonic-gate nfs4_sharedfh_t * 2977c478bd9Sstevel@tonic-gate badrootfh_check(nfs4_sharedfh_t *fh, nfs4_fname_t *nm, mntinfo4_t *mi, 2987c478bd9Sstevel@tonic-gate int *wasbad) 2997c478bd9Sstevel@tonic-gate { 3007c478bd9Sstevel@tonic-gate char *s; 3017c478bd9Sstevel@tonic-gate 3027c478bd9Sstevel@tonic-gate *wasbad = 0; 3037c478bd9Sstevel@tonic-gate s = fn_name(nm); 3047c478bd9Sstevel@tonic-gate ASSERT(strcmp(s, "..") != 0); 3057c478bd9Sstevel@tonic-gate 3067c478bd9Sstevel@tonic-gate if ((s[0] == '.' && s[1] == '\0') && fh && 307b9238976Sth !SFH4_SAME(mi->mi_rootfh, fh)) { 3087c478bd9Sstevel@tonic-gate #ifdef DEBUG 3097c478bd9Sstevel@tonic-gate nfs4_fhandle_t fhandle; 3107c478bd9Sstevel@tonic-gate 3117c478bd9Sstevel@tonic-gate zcmn_err(mi->mi_zone->zone_id, CE_WARN, 3127c478bd9Sstevel@tonic-gate "Server %s returns a different " 3137c478bd9Sstevel@tonic-gate "root filehandle for the path %s:", 3147c478bd9Sstevel@tonic-gate mi->mi_curr_serv->sv_hostname, 3157c478bd9Sstevel@tonic-gate mi->mi_curr_serv->sv_path); 3167c478bd9Sstevel@tonic-gate 3177c478bd9Sstevel@tonic-gate /* print the bad fh */ 3187c478bd9Sstevel@tonic-gate fhandle.fh_len = fh->sfh_fh.nfs_fh4_len; 3197c478bd9Sstevel@tonic-gate bcopy(fh->sfh_fh.nfs_fh4_val, fhandle.fh_buf, 320b9238976Sth fhandle.fh_len); 3217c478bd9Sstevel@tonic-gate nfs4_printfhandle(&fhandle); 3227c478bd9Sstevel@tonic-gate 3237c478bd9Sstevel@tonic-gate /* print mi_rootfh */ 3247c478bd9Sstevel@tonic-gate fhandle.fh_len = mi->mi_rootfh->sfh_fh.nfs_fh4_len; 3257c478bd9Sstevel@tonic-gate bcopy(mi->mi_rootfh->sfh_fh.nfs_fh4_val, fhandle.fh_buf, 326b9238976Sth fhandle.fh_len); 3277c478bd9Sstevel@tonic-gate nfs4_printfhandle(&fhandle); 3287c478bd9Sstevel@tonic-gate #endif 3297c478bd9Sstevel@tonic-gate /* use mi_rootfh instead; fh will be rele by the caller */ 3307c478bd9Sstevel@tonic-gate fh = mi->mi_rootfh; 3317c478bd9Sstevel@tonic-gate *wasbad = 1; 3327c478bd9Sstevel@tonic-gate } 3337c478bd9Sstevel@tonic-gate 3347c478bd9Sstevel@tonic-gate kmem_free(s, MAXNAMELEN); 3357c478bd9Sstevel@tonic-gate return (fh); 3367c478bd9Sstevel@tonic-gate } 3377c478bd9Sstevel@tonic-gate 3387c478bd9Sstevel@tonic-gate void 3397c478bd9Sstevel@tonic-gate r4_do_attrcache(vnode_t *vp, nfs4_ga_res_t *garp, int newnode, 3407c478bd9Sstevel@tonic-gate hrtime_t t, cred_t *cr, int index) 3417c478bd9Sstevel@tonic-gate { 342b9238976Sth int is_stub; 3437c478bd9Sstevel@tonic-gate vattr_t *attr; 3447c478bd9Sstevel@tonic-gate /* 3457c478bd9Sstevel@tonic-gate * Don't add to attrcache if time overflow, but 3467c478bd9Sstevel@tonic-gate * no need to check because either attr is null or the time 3477c478bd9Sstevel@tonic-gate * values in it were processed by nfs4_time_ntov(), which checks 3487c478bd9Sstevel@tonic-gate * for time overflows. 3497c478bd9Sstevel@tonic-gate */ 3507c478bd9Sstevel@tonic-gate attr = garp ? &garp->n4g_va : NULL; 3517c478bd9Sstevel@tonic-gate 3527c478bd9Sstevel@tonic-gate if (attr) { 3537c478bd9Sstevel@tonic-gate if (!newnode) { 3547c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 3557c478bd9Sstevel@tonic-gate #ifdef DEBUG 3567c478bd9Sstevel@tonic-gate if (vp->v_type != attr->va_type && 3577c478bd9Sstevel@tonic-gate vp->v_type != VNON && attr->va_type != VNON) { 3587c478bd9Sstevel@tonic-gate zcmn_err(VTOMI4(vp)->mi_zone->zone_id, CE_WARN, 359b9238976Sth "makenfs4node: type (%d) doesn't " 360b9238976Sth "match type of found node at %p (%d)", 361b9238976Sth attr->va_type, (void *)vp, vp->v_type); 3627c478bd9Sstevel@tonic-gate } 3637c478bd9Sstevel@tonic-gate #endif 3647c478bd9Sstevel@tonic-gate nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL); 3657c478bd9Sstevel@tonic-gate } else { 3667c478bd9Sstevel@tonic-gate rnode4_t *rp = VTOR4(vp); 3677c478bd9Sstevel@tonic-gate 3687c478bd9Sstevel@tonic-gate vp->v_type = attr->va_type; 3697c478bd9Sstevel@tonic-gate vp->v_rdev = attr->va_rdev; 3707c478bd9Sstevel@tonic-gate 3717c478bd9Sstevel@tonic-gate /* 3727c478bd9Sstevel@tonic-gate * Turn this object into a "stub" object if we 373b9238976Sth * crossed an underlying server fs boundary. 374b9238976Sth * To make this check, during mount we save the 3757c478bd9Sstevel@tonic-gate * fsid of the server object being mounted. 3767c478bd9Sstevel@tonic-gate * Here we compare this object's server fsid 3777c478bd9Sstevel@tonic-gate * with the fsid we saved at mount. If they 3787c478bd9Sstevel@tonic-gate * are different, we crossed server fs boundary. 3797c478bd9Sstevel@tonic-gate * 380b9238976Sth * The stub type is set (or not) at rnode 3817c478bd9Sstevel@tonic-gate * creation time and it never changes for life 382b9238976Sth * of the rnode. 3837c478bd9Sstevel@tonic-gate * 384b9238976Sth * The stub type is also set during RO failover, 385b9238976Sth * nfs4_remap_file(). 386b9238976Sth * 387b9238976Sth * This stub will be for a mirror-mount. 388b9238976Sth * 389b9238976Sth * We don't bother with taking r_state_lock to 390b9238976Sth * set the stub type because this is a new rnode 391b9238976Sth * and we're holding the hash bucket r_lock RW_WRITER. 392b9238976Sth * No other thread could have obtained access 393b9238976Sth * to this rnode. 3947c478bd9Sstevel@tonic-gate */ 395b9238976Sth is_stub = 0; 3967c478bd9Sstevel@tonic-gate if (garp->n4g_fsid_valid) { 397b9238976Sth fattr4_fsid ga_fsid = garp->n4g_fsid; 398b9238976Sth servinfo4_t *svp = rp->r_server; 3997c478bd9Sstevel@tonic-gate 400b9238976Sth rp->r_srv_fsid = ga_fsid; 4017c478bd9Sstevel@tonic-gate 402b9238976Sth (void) nfs_rw_enter_sig(&svp->sv_lock, 403b9238976Sth RW_READER, 0); 404b9238976Sth if (!FATTR4_FSID_EQ(&ga_fsid, &svp->sv_fsid)) 405b9238976Sth is_stub = 1; 406b9238976Sth nfs_rw_exit(&svp->sv_lock); 4077c478bd9Sstevel@tonic-gate } 4087c478bd9Sstevel@tonic-gate 409b9238976Sth if (is_stub) 410b9238976Sth r4_stub_mirrormount(rp); 411b9238976Sth else 412b9238976Sth r4_stub_none(rp); 413b9238976Sth 4147c478bd9Sstevel@tonic-gate /* Can not cache partial attr */ 4157c478bd9Sstevel@tonic-gate if (attr->va_mask == AT_ALL) 4167c478bd9Sstevel@tonic-gate nfs4_attrcache_noinval(vp, garp, t); 4177c478bd9Sstevel@tonic-gate else 4187c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4(vp); 4197c478bd9Sstevel@tonic-gate 4207c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 4217c478bd9Sstevel@tonic-gate } 4227c478bd9Sstevel@tonic-gate } else { 4237c478bd9Sstevel@tonic-gate if (newnode) { 4247c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4(vp); 4257c478bd9Sstevel@tonic-gate } 4267c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 4277c478bd9Sstevel@tonic-gate } 4287c478bd9Sstevel@tonic-gate } 4297c478bd9Sstevel@tonic-gate 4307c478bd9Sstevel@tonic-gate /* 4317c478bd9Sstevel@tonic-gate * Find or create an rnode based primarily on filehandle. To be 4327c478bd9Sstevel@tonic-gate * used when dvp (vnode for parent directory) is not available; 4337c478bd9Sstevel@tonic-gate * otherwise, makenfs4node() should be used. 4347c478bd9Sstevel@tonic-gate * 4357c478bd9Sstevel@tonic-gate * The nfs4_fname_t argument *npp is consumed and nulled out. 4367c478bd9Sstevel@tonic-gate */ 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate vnode_t * 4397c478bd9Sstevel@tonic-gate makenfs4node_by_fh(nfs4_sharedfh_t *sfh, nfs4_sharedfh_t *psfh, 440b9238976Sth nfs4_fname_t **npp, nfs4_ga_res_t *garp, 441b9238976Sth mntinfo4_t *mi, cred_t *cr, hrtime_t t) 4427c478bd9Sstevel@tonic-gate { 4437c478bd9Sstevel@tonic-gate vfs_t *vfsp = mi->mi_vfsp; 4447c478bd9Sstevel@tonic-gate int newnode = 0; 4457c478bd9Sstevel@tonic-gate vnode_t *vp; 4467c478bd9Sstevel@tonic-gate rnode4_t *rp; 4477c478bd9Sstevel@tonic-gate svnode_t *svp; 448bbf2a467SNagakiran Rajashekar nfs4_fname_t *name, *svpname; 4497c478bd9Sstevel@tonic-gate int index; 4507c478bd9Sstevel@tonic-gate 4517c478bd9Sstevel@tonic-gate ASSERT(npp && *npp); 4527c478bd9Sstevel@tonic-gate name = *npp; 4537c478bd9Sstevel@tonic-gate *npp = NULL; 4547c478bd9Sstevel@tonic-gate 4557c478bd9Sstevel@tonic-gate index = rtable4hash(sfh); 4567c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate vp = make_rnode4(sfh, &rtable4[index], vfsp, 4597c478bd9Sstevel@tonic-gate nfs4_vnodeops, nfs4_putapage, &newnode, cr); 460bbf2a467SNagakiran Rajashekar 461bbf2a467SNagakiran Rajashekar svp = VTOSV(vp); 462bbf2a467SNagakiran Rajashekar rp = VTOR4(vp); 4637c478bd9Sstevel@tonic-gate if (newnode) { 4647c478bd9Sstevel@tonic-gate svp->sv_forw = svp->sv_back = svp; 4657c478bd9Sstevel@tonic-gate svp->sv_name = name; 4667c478bd9Sstevel@tonic-gate if (psfh != NULL) 4677c478bd9Sstevel@tonic-gate sfh4_hold(psfh); 4687c478bd9Sstevel@tonic-gate svp->sv_dfh = psfh; 4694151f947SPavel Filipensky } else { 470bbf2a467SNagakiran Rajashekar /* 471bbf2a467SNagakiran Rajashekar * It is possible that due to a server 472bbf2a467SNagakiran Rajashekar * side rename fnames have changed. 473bbf2a467SNagakiran Rajashekar * update the fname here. 474bbf2a467SNagakiran Rajashekar */ 475bbf2a467SNagakiran Rajashekar mutex_enter(&rp->r_svlock); 476bbf2a467SNagakiran Rajashekar svpname = svp->sv_name; 477bbf2a467SNagakiran Rajashekar if (svp->sv_name != name) { 478bbf2a467SNagakiran Rajashekar svp->sv_name = name; 479bbf2a467SNagakiran Rajashekar mutex_exit(&rp->r_svlock); 480bbf2a467SNagakiran Rajashekar fn_rele(&svpname); 481bbf2a467SNagakiran Rajashekar } else { 482bbf2a467SNagakiran Rajashekar mutex_exit(&rp->r_svlock); 483bbf2a467SNagakiran Rajashekar fn_rele(&name); 484bbf2a467SNagakiran Rajashekar } 4857c478bd9Sstevel@tonic-gate } 4867c478bd9Sstevel@tonic-gate 4877c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock)); 4887c478bd9Sstevel@tonic-gate r4_do_attrcache(vp, garp, newnode, t, cr, index); 4897c478bd9Sstevel@tonic-gate ASSERT(rw_owner(&rtable4[index].r_lock) != curthread); 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate return (vp); 4927c478bd9Sstevel@tonic-gate } 4937c478bd9Sstevel@tonic-gate 4947c478bd9Sstevel@tonic-gate /* 4957c478bd9Sstevel@tonic-gate * Find or create a vnode for the given filehandle, filesystem, parent, and 4967c478bd9Sstevel@tonic-gate * name. The reference to nm is consumed, so the caller must first do an 4977c478bd9Sstevel@tonic-gate * fn_hold() if it wants to continue using nm after this call. 4987c478bd9Sstevel@tonic-gate */ 4997c478bd9Sstevel@tonic-gate vnode_t * 5007c478bd9Sstevel@tonic-gate makenfs4node(nfs4_sharedfh_t *fh, nfs4_ga_res_t *garp, struct vfs *vfsp, 501b9238976Sth hrtime_t t, cred_t *cr, vnode_t *dvp, nfs4_fname_t *nm) 5027c478bd9Sstevel@tonic-gate { 5037c478bd9Sstevel@tonic-gate vnode_t *vp; 5047c478bd9Sstevel@tonic-gate int newnode; 5057c478bd9Sstevel@tonic-gate int index; 5067c478bd9Sstevel@tonic-gate mntinfo4_t *mi = VFTOMI4(vfsp); 5077c478bd9Sstevel@tonic-gate int had_badfh = 0; 5087c478bd9Sstevel@tonic-gate rnode4_t *rp; 5097c478bd9Sstevel@tonic-gate 5107c478bd9Sstevel@tonic-gate ASSERT(dvp != NULL); 5117c478bd9Sstevel@tonic-gate 5127c478bd9Sstevel@tonic-gate fh = badrootfh_check(fh, nm, mi, &had_badfh); 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate index = rtable4hash(fh); 5157c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 5167c478bd9Sstevel@tonic-gate 5177c478bd9Sstevel@tonic-gate /* 5187c478bd9Sstevel@tonic-gate * Note: make_rnode4() may upgrade the hash bucket lock to exclusive. 5197c478bd9Sstevel@tonic-gate */ 5207c478bd9Sstevel@tonic-gate vp = make_rnode4(fh, &rtable4[index], vfsp, nfs4_vnodeops, 5217c478bd9Sstevel@tonic-gate nfs4_putapage, &newnode, cr); 5227c478bd9Sstevel@tonic-gate 5237c478bd9Sstevel@tonic-gate rp = VTOR4(vp); 5247c478bd9Sstevel@tonic-gate sv_activate(&vp, dvp, &nm, newnode); 5257c478bd9Sstevel@tonic-gate if (dvp->v_flag & V_XATTRDIR) { 5267c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 5277c478bd9Sstevel@tonic-gate rp->r_flags |= R4ISXATTR; 5287c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 5297c478bd9Sstevel@tonic-gate } 5307c478bd9Sstevel@tonic-gate 5317c478bd9Sstevel@tonic-gate /* if getting a bad file handle, do not cache the attributes. */ 5327c478bd9Sstevel@tonic-gate if (had_badfh) { 5337c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 5347c478bd9Sstevel@tonic-gate return (vp); 5357c478bd9Sstevel@tonic-gate } 5367c478bd9Sstevel@tonic-gate 5377c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock)); 5387c478bd9Sstevel@tonic-gate r4_do_attrcache(vp, garp, newnode, t, cr, index); 5397c478bd9Sstevel@tonic-gate ASSERT(rw_owner(&rtable4[index].r_lock) != curthread); 5407c478bd9Sstevel@tonic-gate 5417c478bd9Sstevel@tonic-gate return (vp); 5427c478bd9Sstevel@tonic-gate } 5437c478bd9Sstevel@tonic-gate 5447c478bd9Sstevel@tonic-gate /* 5457c478bd9Sstevel@tonic-gate * Hash on address of filehandle object. 5467c478bd9Sstevel@tonic-gate * XXX totally untuned. 5477c478bd9Sstevel@tonic-gate */ 5487c478bd9Sstevel@tonic-gate 5497c478bd9Sstevel@tonic-gate int 5507c478bd9Sstevel@tonic-gate rtable4hash(nfs4_sharedfh_t *fh) 5517c478bd9Sstevel@tonic-gate { 5527c478bd9Sstevel@tonic-gate return (((uintptr_t)fh / sizeof (*fh)) & rtable4mask); 5537c478bd9Sstevel@tonic-gate } 5547c478bd9Sstevel@tonic-gate 5557c478bd9Sstevel@tonic-gate /* 5567c478bd9Sstevel@tonic-gate * Find or create the vnode for the given filehandle and filesystem. 5577c478bd9Sstevel@tonic-gate * *newnode is set to zero if the vnode already existed; non-zero if it had 5587c478bd9Sstevel@tonic-gate * to be created. 5597c478bd9Sstevel@tonic-gate * 5607c478bd9Sstevel@tonic-gate * Note: make_rnode4() may upgrade the hash bucket lock to exclusive. 5617c478bd9Sstevel@tonic-gate */ 5627c478bd9Sstevel@tonic-gate 5637c478bd9Sstevel@tonic-gate static vnode_t * 5647c478bd9Sstevel@tonic-gate make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp, 5657c478bd9Sstevel@tonic-gate struct vnodeops *vops, 5667c478bd9Sstevel@tonic-gate int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *), 5677c478bd9Sstevel@tonic-gate int *newnode, cred_t *cr) 5687c478bd9Sstevel@tonic-gate { 5697c478bd9Sstevel@tonic-gate rnode4_t *rp; 5707c478bd9Sstevel@tonic-gate rnode4_t *trp; 5717c478bd9Sstevel@tonic-gate vnode_t *vp; 5727c478bd9Sstevel@tonic-gate mntinfo4_t *mi; 5737c478bd9Sstevel@tonic-gate 5747c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&rhtp->r_lock)); 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate mi = VFTOMI4(vfsp); 5777c478bd9Sstevel@tonic-gate 5787c478bd9Sstevel@tonic-gate start: 5797c478bd9Sstevel@tonic-gate if ((rp = r4find(rhtp, fh, vfsp)) != NULL) { 5807c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 5817c478bd9Sstevel@tonic-gate *newnode = 0; 5827c478bd9Sstevel@tonic-gate return (vp); 5837c478bd9Sstevel@tonic-gate } 5847c478bd9Sstevel@tonic-gate rw_exit(&rhtp->r_lock); 5857c478bd9Sstevel@tonic-gate 5867c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 5877c478bd9Sstevel@tonic-gate 5887c478bd9Sstevel@tonic-gate if (rp4freelist != NULL && rnode4_new >= nrnode) { 5897c478bd9Sstevel@tonic-gate rp = rp4freelist; 5907c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 5917c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 5927c478bd9Sstevel@tonic-gate 5937c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 5947c478bd9Sstevel@tonic-gate 5957c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 5967c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 5977c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 5987c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 5997c478bd9Sstevel@tonic-gate vp->v_count--; 6007c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6017c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 6027c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 6037c478bd9Sstevel@tonic-gate goto start; 6047c478bd9Sstevel@tonic-gate } 6057c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6067c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 6077c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 6087c478bd9Sstevel@tonic-gate } 6097c478bd9Sstevel@tonic-gate 6107c478bd9Sstevel@tonic-gate r4inactive(rp, cr); 6117c478bd9Sstevel@tonic-gate 6127c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 6137c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 6147c478bd9Sstevel@tonic-gate vp->v_count--; 6157c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6167c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 6177c478bd9Sstevel@tonic-gate goto start; 6187c478bd9Sstevel@tonic-gate } 6197c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 6207c478bd9Sstevel@tonic-gate vn_invalid(vp); 6217c478bd9Sstevel@tonic-gate 6227c478bd9Sstevel@tonic-gate /* 6237c478bd9Sstevel@tonic-gate * destroy old locks before bzero'ing and 6247c478bd9Sstevel@tonic-gate * recreating the locks below. 6257c478bd9Sstevel@tonic-gate */ 6267c478bd9Sstevel@tonic-gate uninit_rnode4(rp); 6277c478bd9Sstevel@tonic-gate 6287c478bd9Sstevel@tonic-gate /* 6297c478bd9Sstevel@tonic-gate * Make sure that if rnode is recycled then 6307c478bd9Sstevel@tonic-gate * VFS count is decremented properly before 6317c478bd9Sstevel@tonic-gate * reuse. 6327c478bd9Sstevel@tonic-gate */ 6337c478bd9Sstevel@tonic-gate VFS_RELE(vp->v_vfsp); 6347c478bd9Sstevel@tonic-gate vn_reinit(vp); 6357c478bd9Sstevel@tonic-gate } else { 6367c478bd9Sstevel@tonic-gate vnode_t *new_vp; 6377c478bd9Sstevel@tonic-gate 6387c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 6397c478bd9Sstevel@tonic-gate 6407c478bd9Sstevel@tonic-gate rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP); 6417c478bd9Sstevel@tonic-gate new_vp = vn_alloc(KM_SLEEP); 6427c478bd9Sstevel@tonic-gate 6437c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)&rnode4_new, 1); 6447c478bd9Sstevel@tonic-gate #ifdef DEBUG 6457c478bd9Sstevel@tonic-gate clstat4_debug.nrnode.value.ui64++; 6467c478bd9Sstevel@tonic-gate #endif 6477c478bd9Sstevel@tonic-gate vp = new_vp; 6487c478bd9Sstevel@tonic-gate } 6497c478bd9Sstevel@tonic-gate 6507c478bd9Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 6517c478bd9Sstevel@tonic-gate rp->r_vnode = vp; 6527c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL); 6537c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL); 6547c478bd9Sstevel@tonic-gate mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL); 6557c478bd9Sstevel@tonic-gate mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL); 6567c478bd9Sstevel@tonic-gate mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL); 6577c478bd9Sstevel@tonic-gate mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL); 6587c478bd9Sstevel@tonic-gate rp->created_v4 = 0; 6597c478bd9Sstevel@tonic-gate list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t), 6607c478bd9Sstevel@tonic-gate offsetof(nfs4_open_stream_t, os_node)); 6617c478bd9Sstevel@tonic-gate rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head; 6627c478bd9Sstevel@tonic-gate rp->r_lo_head.lo_next_rnode = &rp->r_lo_head; 6637c478bd9Sstevel@tonic-gate cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL); 6647c478bd9Sstevel@tonic-gate cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL); 6657c478bd9Sstevel@tonic-gate rp->r_flags = R4READDIRWATTR; 6667c478bd9Sstevel@tonic-gate rp->r_fh = fh; 6677c478bd9Sstevel@tonic-gate rp->r_hashq = rhtp; 6687c478bd9Sstevel@tonic-gate sfh4_hold(rp->r_fh); 6697c478bd9Sstevel@tonic-gate rp->r_server = mi->mi_curr_serv; 6707c478bd9Sstevel@tonic-gate rp->r_deleg_type = OPEN_DELEGATE_NONE; 6717c478bd9Sstevel@tonic-gate rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE; 6727c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL); 6737c478bd9Sstevel@tonic-gate 6747c478bd9Sstevel@tonic-gate rddir4_cache_create(rp); 6757c478bd9Sstevel@tonic-gate rp->r_putapage = putapage; 6767c478bd9Sstevel@tonic-gate vn_setops(vp, vops); 6777c478bd9Sstevel@tonic-gate vp->v_data = (caddr_t)rp; 6787c478bd9Sstevel@tonic-gate vp->v_vfsp = vfsp; 6797c478bd9Sstevel@tonic-gate VFS_HOLD(vfsp); 6807c478bd9Sstevel@tonic-gate vp->v_type = VNON; 6817c478bd9Sstevel@tonic-gate if (isrootfh(fh, rp)) 6827c478bd9Sstevel@tonic-gate vp->v_flag = VROOT; 6837c478bd9Sstevel@tonic-gate vn_exists(vp); 6847c478bd9Sstevel@tonic-gate 6857c478bd9Sstevel@tonic-gate /* 6867c478bd9Sstevel@tonic-gate * There is a race condition if someone else 6877c478bd9Sstevel@tonic-gate * alloc's the rnode while no locks are held, so we 6887c478bd9Sstevel@tonic-gate * check again and recover if found. 6897c478bd9Sstevel@tonic-gate */ 6907c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_WRITER); 6917c478bd9Sstevel@tonic-gate if ((trp = r4find(rhtp, fh, vfsp)) != NULL) { 6927c478bd9Sstevel@tonic-gate vp = RTOV4(trp); 6937c478bd9Sstevel@tonic-gate *newnode = 0; 6947c478bd9Sstevel@tonic-gate rw_exit(&rhtp->r_lock); 6957c478bd9Sstevel@tonic-gate rp4_addfree(rp, cr); 6967c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 6977c478bd9Sstevel@tonic-gate return (vp); 6987c478bd9Sstevel@tonic-gate } 6997c478bd9Sstevel@tonic-gate rp4_addhash(rp); 7007c478bd9Sstevel@tonic-gate *newnode = 1; 7017c478bd9Sstevel@tonic-gate return (vp); 7027c478bd9Sstevel@tonic-gate } 7037c478bd9Sstevel@tonic-gate 7047c478bd9Sstevel@tonic-gate static void 7057c478bd9Sstevel@tonic-gate uninit_rnode4(rnode4_t *rp) 7067c478bd9Sstevel@tonic-gate { 7077c478bd9Sstevel@tonic-gate vnode_t *vp = RTOV4(rp); 7087c478bd9Sstevel@tonic-gate 7097c478bd9Sstevel@tonic-gate ASSERT(rp != NULL); 7107c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 7117c478bd9Sstevel@tonic-gate ASSERT(vp->v_count == 1); 7127c478bd9Sstevel@tonic-gate ASSERT(rp->r_count == 0); 7137c478bd9Sstevel@tonic-gate ASSERT(rp->r_mapcnt == 0); 7147c478bd9Sstevel@tonic-gate if (rp->r_flags & R4LODANGLERS) { 7157c478bd9Sstevel@tonic-gate nfs4_flush_lock_owners(rp); 7167c478bd9Sstevel@tonic-gate } 7177c478bd9Sstevel@tonic-gate ASSERT(rp->r_lo_head.lo_next_rnode == &rp->r_lo_head); 7187c478bd9Sstevel@tonic-gate ASSERT(rp->r_lo_head.lo_prev_rnode == &rp->r_lo_head); 7197c478bd9Sstevel@tonic-gate ASSERT(!(rp->r_flags & R4HASHED)); 7207c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); 7217c478bd9Sstevel@tonic-gate nfs4_clear_open_streams(rp); 7227c478bd9Sstevel@tonic-gate list_destroy(&rp->r_open_streams); 7237c478bd9Sstevel@tonic-gate 7247c478bd9Sstevel@tonic-gate /* 7257c478bd9Sstevel@tonic-gate * Destroy the rddir cache first since we need to grab the r_statelock. 7267c478bd9Sstevel@tonic-gate */ 7277c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 7287c478bd9Sstevel@tonic-gate rddir4_cache_destroy(rp); 7297c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 7307c478bd9Sstevel@tonic-gate sv_uninit(&rp->r_svnode); 7317c478bd9Sstevel@tonic-gate sfh4_rele(&rp->r_fh); 7327c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_rwlock); 7337c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_lkserlock); 7347c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_statelock); 7357c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_statev4_lock); 7367c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_os_lock); 7377c478bd9Sstevel@tonic-gate cv_destroy(&rp->r_cv); 7387c478bd9Sstevel@tonic-gate cv_destroy(&rp->r_commit.c_cv); 7397c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_deleg_recall_lock); 7407c478bd9Sstevel@tonic-gate if (rp->r_flags & R4DELMAPLIST) 7417c478bd9Sstevel@tonic-gate list_destroy(&rp->r_indelmap); 7427c478bd9Sstevel@tonic-gate } 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate /* 7457c478bd9Sstevel@tonic-gate * Put an rnode on the free list. 7467c478bd9Sstevel@tonic-gate * 7477c478bd9Sstevel@tonic-gate * Rnodes which were allocated above and beyond the normal limit 7487c478bd9Sstevel@tonic-gate * are immediately freed. 7497c478bd9Sstevel@tonic-gate */ 7507c478bd9Sstevel@tonic-gate void 7517c478bd9Sstevel@tonic-gate rp4_addfree(rnode4_t *rp, cred_t *cr) 7527c478bd9Sstevel@tonic-gate { 7537c478bd9Sstevel@tonic-gate vnode_t *vp; 7547c478bd9Sstevel@tonic-gate vnode_t *xattr; 7557c478bd9Sstevel@tonic-gate struct vfs *vfsp; 7567c478bd9Sstevel@tonic-gate 7577c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 7587c478bd9Sstevel@tonic-gate ASSERT(vp->v_count >= 1); 7597c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); 7607c478bd9Sstevel@tonic-gate 7617c478bd9Sstevel@tonic-gate /* 7627c478bd9Sstevel@tonic-gate * If we have too many rnodes allocated and there are no 7637c478bd9Sstevel@tonic-gate * references to this rnode, or if the rnode is no longer 7647c478bd9Sstevel@tonic-gate * accessible by it does not reside in the hash queues, 7657c478bd9Sstevel@tonic-gate * or if an i/o error occurred while writing to the file, 7667c478bd9Sstevel@tonic-gate * then just free it instead of putting it on the rnode 7677c478bd9Sstevel@tonic-gate * freelist. 7687c478bd9Sstevel@tonic-gate */ 7697c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp; 7707c478bd9Sstevel@tonic-gate if (((rnode4_new > nrnode || !(rp->r_flags & R4HASHED) || 7717c478bd9Sstevel@tonic-gate #ifdef DEBUG 7727c478bd9Sstevel@tonic-gate (nfs4_rnode_nofreelist != 0) || 7737c478bd9Sstevel@tonic-gate #endif 7747c478bd9Sstevel@tonic-gate rp->r_error || (rp->r_flags & R4RECOVERR) || 7757c478bd9Sstevel@tonic-gate (vfsp->vfs_flag & VFS_UNMOUNTED)) && rp->r_count == 0)) { 7767c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 7777c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 7787c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 7797c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 7807c478bd9Sstevel@tonic-gate vp->v_count--; 7817c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7827c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7837c478bd9Sstevel@tonic-gate return; 7847c478bd9Sstevel@tonic-gate } 7857c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7867c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 7877c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7887c478bd9Sstevel@tonic-gate } 7897c478bd9Sstevel@tonic-gate 7907c478bd9Sstevel@tonic-gate /* 7917c478bd9Sstevel@tonic-gate * Make sure we don't have a delegation on this rnode 7927c478bd9Sstevel@tonic-gate * before destroying it. 7937c478bd9Sstevel@tonic-gate */ 7947c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 7957c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, 796b9238976Sth NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); 7977c478bd9Sstevel@tonic-gate } 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate r4inactive(rp, cr); 8007c478bd9Sstevel@tonic-gate 8017c478bd9Sstevel@tonic-gate /* 8027c478bd9Sstevel@tonic-gate * Recheck the vnode reference count. We need to 8037c478bd9Sstevel@tonic-gate * make sure that another reference has not been 8047c478bd9Sstevel@tonic-gate * acquired while we were not holding v_lock. The 8057c478bd9Sstevel@tonic-gate * rnode is not in the rnode hash queues; one 8067c478bd9Sstevel@tonic-gate * way for a reference to have been acquired 8077c478bd9Sstevel@tonic-gate * is for a VOP_PUTPAGE because the rnode was marked 8087c478bd9Sstevel@tonic-gate * with R4DIRTY or for a modified page. This 8097c478bd9Sstevel@tonic-gate * reference may have been acquired before our call 8107c478bd9Sstevel@tonic-gate * to r4inactive. The i/o may have been completed, 8117c478bd9Sstevel@tonic-gate * thus allowing r4inactive to complete, but the 8127c478bd9Sstevel@tonic-gate * reference to the vnode may not have been released 8137c478bd9Sstevel@tonic-gate * yet. In any case, the rnode can not be destroyed 8147c478bd9Sstevel@tonic-gate * until the other references to this vnode have been 8157c478bd9Sstevel@tonic-gate * released. The other references will take care of 8167c478bd9Sstevel@tonic-gate * either destroying the rnode or placing it on the 8177c478bd9Sstevel@tonic-gate * rnode freelist. If there are no other references, 8187c478bd9Sstevel@tonic-gate * then the rnode may be safely destroyed. 8197c478bd9Sstevel@tonic-gate */ 8207c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 8217c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 8227c478bd9Sstevel@tonic-gate vp->v_count--; 8237c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8247c478bd9Sstevel@tonic-gate return; 8257c478bd9Sstevel@tonic-gate } 8267c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate destroy_rnode4(rp); 8297c478bd9Sstevel@tonic-gate return; 8307c478bd9Sstevel@tonic-gate } 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate /* 8337c478bd9Sstevel@tonic-gate * Lock the hash queue and then recheck the reference count 8347c478bd9Sstevel@tonic-gate * to ensure that no other threads have acquired a reference 8357c478bd9Sstevel@tonic-gate * to indicate that the rnode should not be placed on the 8367c478bd9Sstevel@tonic-gate * freelist. If another reference has been acquired, then 8377c478bd9Sstevel@tonic-gate * just release this one and let the other thread complete 8387c478bd9Sstevel@tonic-gate * the processing of adding this rnode to the freelist. 8397c478bd9Sstevel@tonic-gate */ 8407c478bd9Sstevel@tonic-gate again: 8417c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 8427c478bd9Sstevel@tonic-gate 8437c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 8447c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 8457c478bd9Sstevel@tonic-gate vp->v_count--; 8467c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8477c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 8487c478bd9Sstevel@tonic-gate return; 8497c478bd9Sstevel@tonic-gate } 8507c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 8517c478bd9Sstevel@tonic-gate 8527c478bd9Sstevel@tonic-gate /* 8537c478bd9Sstevel@tonic-gate * Make sure we don't put an rnode with a delegation 8547c478bd9Sstevel@tonic-gate * on the free list. 8557c478bd9Sstevel@tonic-gate */ 8567c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 8577c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 8587c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, 859b9238976Sth NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); 8607c478bd9Sstevel@tonic-gate goto again; 8617c478bd9Sstevel@tonic-gate } 8627c478bd9Sstevel@tonic-gate 8637c478bd9Sstevel@tonic-gate /* 8647c478bd9Sstevel@tonic-gate * Now that we have the hash queue lock, and we know there 8657c478bd9Sstevel@tonic-gate * are not anymore references on the vnode, check to make 8667c478bd9Sstevel@tonic-gate * sure there aren't any open streams still on the rnode. 8677c478bd9Sstevel@tonic-gate * If so, drop the hash queue lock, remove the open streams, 8687c478bd9Sstevel@tonic-gate * and recheck the v_count. 8697c478bd9Sstevel@tonic-gate */ 8707c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_os_lock); 8717c478bd9Sstevel@tonic-gate if (list_head(&rp->r_open_streams) != NULL) { 8727c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 8737c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 874108322fbScarlsonj if (nfs_zone() != VTOMI4(vp)->mi_zone) 8757c478bd9Sstevel@tonic-gate nfs4_clear_open_streams(rp); 8767c478bd9Sstevel@tonic-gate else 8777c478bd9Sstevel@tonic-gate (void) nfs4close_all(vp, cr); 8787c478bd9Sstevel@tonic-gate goto again; 8797c478bd9Sstevel@tonic-gate } 8807c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 8817c478bd9Sstevel@tonic-gate 8829f9e2373Sjwahlig /* 8839f9e2373Sjwahlig * Before we put it on the freelist, make sure there are no pages. 8849f9e2373Sjwahlig * If there are, flush and commit of all of the dirty and 8859f9e2373Sjwahlig * uncommitted pages, assuming the file system isn't read only. 8869f9e2373Sjwahlig */ 8879f9e2373Sjwahlig if (!(vp->v_vfsp->vfs_flag & VFS_RDONLY) && nfs4_dross_pages(vp)) { 8889f9e2373Sjwahlig rw_exit(&rp->r_hashq->r_lock); 8899f9e2373Sjwahlig r4flushpages(rp, cr); 8909f9e2373Sjwahlig goto again; 8919f9e2373Sjwahlig } 8929f9e2373Sjwahlig 8937c478bd9Sstevel@tonic-gate /* 8947c478bd9Sstevel@tonic-gate * Before we put it on the freelist, make sure there is no 8957c478bd9Sstevel@tonic-gate * active xattr directory cached, the freelist will not 8967c478bd9Sstevel@tonic-gate * have its entries r4inactive'd if there is still an active 8977c478bd9Sstevel@tonic-gate * rnode, thus nothing in the freelist can hold another 8987c478bd9Sstevel@tonic-gate * rnode active. 8997c478bd9Sstevel@tonic-gate */ 9007c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 9017c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 9027c478bd9Sstevel@tonic-gate 9037c478bd9Sstevel@tonic-gate /* 9047c478bd9Sstevel@tonic-gate * If there is no cached data or metadata for this file, then 9057c478bd9Sstevel@tonic-gate * put the rnode on the front of the freelist so that it will 9067c478bd9Sstevel@tonic-gate * be reused before other rnodes which may have cached data or 9077c478bd9Sstevel@tonic-gate * metadata associated with them. 9087c478bd9Sstevel@tonic-gate */ 9097c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 9107c478bd9Sstevel@tonic-gate if (rp4freelist == NULL) { 9117c478bd9Sstevel@tonic-gate rp->r_freef = rp; 9127c478bd9Sstevel@tonic-gate rp->r_freeb = rp; 9137c478bd9Sstevel@tonic-gate rp4freelist = rp; 9147c478bd9Sstevel@tonic-gate } else { 9157c478bd9Sstevel@tonic-gate rp->r_freef = rp4freelist; 9167c478bd9Sstevel@tonic-gate rp->r_freeb = rp4freelist->r_freeb; 9177c478bd9Sstevel@tonic-gate rp4freelist->r_freeb->r_freef = rp; 9187c478bd9Sstevel@tonic-gate rp4freelist->r_freeb = rp; 9197c478bd9Sstevel@tonic-gate if (!nfs4_has_pages(vp) && rp->r_dir == NULL && 9209f9e2373Sjwahlig rp->r_symlink.contents == NULL && rp->r_secattr == NULL) 9217c478bd9Sstevel@tonic-gate rp4freelist = rp; 9227c478bd9Sstevel@tonic-gate } 9237c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 9247c478bd9Sstevel@tonic-gate 9257c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate if (xattr) 9287c478bd9Sstevel@tonic-gate VN_RELE(xattr); 9297c478bd9Sstevel@tonic-gate } 9307c478bd9Sstevel@tonic-gate 9317c478bd9Sstevel@tonic-gate /* 9327c478bd9Sstevel@tonic-gate * Remove an rnode from the free list. 9337c478bd9Sstevel@tonic-gate * 9347c478bd9Sstevel@tonic-gate * The caller must be holding rp4freelist_lock and the rnode 9357c478bd9Sstevel@tonic-gate * must be on the freelist. 9367c478bd9Sstevel@tonic-gate */ 9377c478bd9Sstevel@tonic-gate static void 9387c478bd9Sstevel@tonic-gate rp4_rmfree(rnode4_t *rp) 9397c478bd9Sstevel@tonic-gate { 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&rp4freelist_lock)); 9427c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef != NULL && rp->r_freeb != NULL); 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate if (rp == rp4freelist) { 9457c478bd9Sstevel@tonic-gate rp4freelist = rp->r_freef; 9467c478bd9Sstevel@tonic-gate if (rp == rp4freelist) 9477c478bd9Sstevel@tonic-gate rp4freelist = NULL; 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate rp->r_freeb->r_freef = rp->r_freef; 9507c478bd9Sstevel@tonic-gate rp->r_freef->r_freeb = rp->r_freeb; 9517c478bd9Sstevel@tonic-gate 9527c478bd9Sstevel@tonic-gate rp->r_freef = rp->r_freeb = NULL; 9537c478bd9Sstevel@tonic-gate } 9547c478bd9Sstevel@tonic-gate 9557c478bd9Sstevel@tonic-gate /* 9567c478bd9Sstevel@tonic-gate * Put a rnode in the hash table. 9577c478bd9Sstevel@tonic-gate * 9587c478bd9Sstevel@tonic-gate * The caller must be holding the exclusive hash queue lock 9597c478bd9Sstevel@tonic-gate */ 9607c478bd9Sstevel@tonic-gate void 9617c478bd9Sstevel@tonic-gate rp4_addhash(rnode4_t *rp) 9627c478bd9Sstevel@tonic-gate { 9637c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock)); 9647c478bd9Sstevel@tonic-gate ASSERT(!(rp->r_flags & R4HASHED)); 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate #ifdef DEBUG 9677c478bd9Sstevel@tonic-gate r4_dup_check(rp, RTOV4(rp)->v_vfsp); 9687c478bd9Sstevel@tonic-gate #endif 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate rp->r_hashf = rp->r_hashq->r_hashf; 9717c478bd9Sstevel@tonic-gate rp->r_hashq->r_hashf = rp; 9727c478bd9Sstevel@tonic-gate rp->r_hashb = (rnode4_t *)rp->r_hashq; 9737c478bd9Sstevel@tonic-gate rp->r_hashf->r_hashb = rp; 9747c478bd9Sstevel@tonic-gate 9757c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9767c478bd9Sstevel@tonic-gate rp->r_flags |= R4HASHED; 9777c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9787c478bd9Sstevel@tonic-gate } 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate /* 9817c478bd9Sstevel@tonic-gate * Remove a rnode from the hash table. 9827c478bd9Sstevel@tonic-gate * 9837c478bd9Sstevel@tonic-gate * The caller must be holding the hash queue lock. 9847c478bd9Sstevel@tonic-gate */ 9857c478bd9Sstevel@tonic-gate void 9867c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rnode4_t *rp) 9877c478bd9Sstevel@tonic-gate { 9887c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock)); 9897c478bd9Sstevel@tonic-gate ASSERT(rp->r_flags & R4HASHED); 9907c478bd9Sstevel@tonic-gate 9917c478bd9Sstevel@tonic-gate rp->r_hashb->r_hashf = rp->r_hashf; 9927c478bd9Sstevel@tonic-gate rp->r_hashf->r_hashb = rp->r_hashb; 9937c478bd9Sstevel@tonic-gate 9947c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9957c478bd9Sstevel@tonic-gate rp->r_flags &= ~R4HASHED; 9967c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9977c478bd9Sstevel@tonic-gate } 9987c478bd9Sstevel@tonic-gate 9997c478bd9Sstevel@tonic-gate /* 10007c478bd9Sstevel@tonic-gate * Remove a rnode from the hash table. 10017c478bd9Sstevel@tonic-gate * 10027c478bd9Sstevel@tonic-gate * The caller must not be holding the hash queue lock. 10037c478bd9Sstevel@tonic-gate */ 10047c478bd9Sstevel@tonic-gate void 10057c478bd9Sstevel@tonic-gate rp4_rmhash(rnode4_t *rp) 10067c478bd9Sstevel@tonic-gate { 10077c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 10087c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 10097c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 10107c478bd9Sstevel@tonic-gate } 10117c478bd9Sstevel@tonic-gate 10127c478bd9Sstevel@tonic-gate /* 10137c478bd9Sstevel@tonic-gate * Lookup a rnode by fhandle. Ignores rnodes that had failed recovery. 10147c478bd9Sstevel@tonic-gate * Returns NULL if no match. If an rnode is returned, the reference count 10157c478bd9Sstevel@tonic-gate * on the master vnode is incremented. 10167c478bd9Sstevel@tonic-gate * 10177c478bd9Sstevel@tonic-gate * The caller must be holding the hash queue lock, either shared or exclusive. 10187c478bd9Sstevel@tonic-gate */ 10197c478bd9Sstevel@tonic-gate rnode4_t * 10207c478bd9Sstevel@tonic-gate r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp) 10217c478bd9Sstevel@tonic-gate { 10227c478bd9Sstevel@tonic-gate rnode4_t *rp; 10237c478bd9Sstevel@tonic-gate vnode_t *vp; 10247c478bd9Sstevel@tonic-gate 10257c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rhtp->r_lock)); 10267c478bd9Sstevel@tonic-gate 10277c478bd9Sstevel@tonic-gate for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) { 10287c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 10297c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) { 10307c478bd9Sstevel@tonic-gate 10317c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 10327c478bd9Sstevel@tonic-gate if (rp->r_flags & R4RECOVERR) { 10337c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10347c478bd9Sstevel@tonic-gate continue; 10357c478bd9Sstevel@tonic-gate } 10367c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 10377c478bd9Sstevel@tonic-gate #ifdef DEBUG 10387c478bd9Sstevel@tonic-gate r4_dup_check(rp, vfsp); 10397c478bd9Sstevel@tonic-gate #endif 10407c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 10417c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 10427c478bd9Sstevel@tonic-gate /* 10437c478bd9Sstevel@tonic-gate * If the rnode is on the freelist, 10447c478bd9Sstevel@tonic-gate * then remove it and use that reference 10457c478bd9Sstevel@tonic-gate * as the new reference. Otherwise, 10467c478bd9Sstevel@tonic-gate * need to increment the reference count. 10477c478bd9Sstevel@tonic-gate */ 10487c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 10497c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 10507c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 10517c478bd9Sstevel@tonic-gate } else { 10527c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 10537c478bd9Sstevel@tonic-gate VN_HOLD(vp); 10547c478bd9Sstevel@tonic-gate } 10557c478bd9Sstevel@tonic-gate } else 10567c478bd9Sstevel@tonic-gate VN_HOLD(vp); 10577c478bd9Sstevel@tonic-gate 10587c478bd9Sstevel@tonic-gate /* 10597c478bd9Sstevel@tonic-gate * if root vnode, set v_flag to indicate that 10607c478bd9Sstevel@tonic-gate */ 10617c478bd9Sstevel@tonic-gate if (isrootfh(fh, rp)) { 10627c478bd9Sstevel@tonic-gate if (!(vp->v_flag & VROOT)) { 10637c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 10647c478bd9Sstevel@tonic-gate vp->v_flag |= VROOT; 10657c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 10667c478bd9Sstevel@tonic-gate } 10677c478bd9Sstevel@tonic-gate } 10687c478bd9Sstevel@tonic-gate return (rp); 10697c478bd9Sstevel@tonic-gate } 10707c478bd9Sstevel@tonic-gate } 10717c478bd9Sstevel@tonic-gate return (NULL); 10727c478bd9Sstevel@tonic-gate } 10737c478bd9Sstevel@tonic-gate 10747c478bd9Sstevel@tonic-gate /* 10757c478bd9Sstevel@tonic-gate * Lookup an rnode by fhandle. Just a wrapper for r4find() 10767c478bd9Sstevel@tonic-gate * that assumes the caller hasn't already got the lock 10777c478bd9Sstevel@tonic-gate * on the hash bucket. 10787c478bd9Sstevel@tonic-gate */ 10797c478bd9Sstevel@tonic-gate rnode4_t * 10807c478bd9Sstevel@tonic-gate r4find_unlocked(nfs4_sharedfh_t *fh, struct vfs *vfsp) 10817c478bd9Sstevel@tonic-gate { 10827c478bd9Sstevel@tonic-gate rnode4_t *rp; 10837c478bd9Sstevel@tonic-gate int index; 10847c478bd9Sstevel@tonic-gate 10857c478bd9Sstevel@tonic-gate index = rtable4hash(fh); 10867c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 10877c478bd9Sstevel@tonic-gate rp = r4find(&rtable4[index], fh, vfsp); 10887c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate return (rp); 10917c478bd9Sstevel@tonic-gate } 10927c478bd9Sstevel@tonic-gate 10937c478bd9Sstevel@tonic-gate /* 1094*6962f5b8SThomas Haynes * Return >0 if there is a active vnode belonging to this vfs in the 10957c478bd9Sstevel@tonic-gate * rtable4 cache. 10967c478bd9Sstevel@tonic-gate * 10977c478bd9Sstevel@tonic-gate * Several of these checks are done without holding the usual 10987c478bd9Sstevel@tonic-gate * locks. This is safe because destroy_rtable(), rp_addfree(), 10997c478bd9Sstevel@tonic-gate * etc. will redo the necessary checks before actually destroying 11007c478bd9Sstevel@tonic-gate * any rnodes. 11017c478bd9Sstevel@tonic-gate */ 11027c478bd9Sstevel@tonic-gate int 11037c478bd9Sstevel@tonic-gate check_rtable4(struct vfs *vfsp) 11047c478bd9Sstevel@tonic-gate { 11057c478bd9Sstevel@tonic-gate rnode4_t *rp; 11067c478bd9Sstevel@tonic-gate vnode_t *vp; 1107*6962f5b8SThomas Haynes int busy = NFSV4_RTABLE4_OK; 11087c478bd9Sstevel@tonic-gate int index; 11097c478bd9Sstevel@tonic-gate 11107c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 11117c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 11127c478bd9Sstevel@tonic-gate 11137c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 11147c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 11157c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 11167c478bd9Sstevel@tonic-gate 11177c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11187c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp) { 11197c478bd9Sstevel@tonic-gate if (rp->r_freef == NULL) { 1120*6962f5b8SThomas Haynes busy = NFSV4_RTABLE4_NOT_FREE_LIST; 11217c478bd9Sstevel@tonic-gate } else if (nfs4_has_pages(vp) && 1122b9238976Sth (rp->r_flags & R4DIRTY)) { 1123*6962f5b8SThomas Haynes busy = NFSV4_RTABLE4_DIRTY_PAGES; 11247c478bd9Sstevel@tonic-gate } else if (rp->r_count > 0) { 1125*6962f5b8SThomas Haynes busy = NFSV4_RTABLE4_POS_R_COUNT; 11267c478bd9Sstevel@tonic-gate } 11277c478bd9Sstevel@tonic-gate 1128*6962f5b8SThomas Haynes if (busy != NFSV4_RTABLE4_OK) { 11297c478bd9Sstevel@tonic-gate #ifdef DEBUG 11307c478bd9Sstevel@tonic-gate char *path; 11317c478bd9Sstevel@tonic-gate 11327c478bd9Sstevel@tonic-gate path = fn_path(rp->r_svnode.sv_name); 1133*6962f5b8SThomas Haynes DTRACE_NFSV4_3(rnode__e__debug, 1134*6962f5b8SThomas Haynes int, busy, char *, path, 1135*6962f5b8SThomas Haynes rnode4_t *, rp); 11367c478bd9Sstevel@tonic-gate kmem_free(path, strlen(path)+1); 11377c478bd9Sstevel@tonic-gate #endif 11387c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 1139*6962f5b8SThomas Haynes return (busy); 11407c478bd9Sstevel@tonic-gate } 11417c478bd9Sstevel@tonic-gate } 11427c478bd9Sstevel@tonic-gate } 11437c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11447c478bd9Sstevel@tonic-gate } 1145*6962f5b8SThomas Haynes return (busy); 11467c478bd9Sstevel@tonic-gate } 11477c478bd9Sstevel@tonic-gate 11487c478bd9Sstevel@tonic-gate /* 11497c478bd9Sstevel@tonic-gate * Destroy inactive vnodes from the hash queues which 11507c478bd9Sstevel@tonic-gate * belong to this vfs. All of the vnodes should be inactive. 1151b9238976Sth * It is essential that we destroy all rnodes in case of 11527c478bd9Sstevel@tonic-gate * forced unmount as well as in normal unmount case. 11537c478bd9Sstevel@tonic-gate */ 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate void 11567c478bd9Sstevel@tonic-gate destroy_rtable4(struct vfs *vfsp, cred_t *cr) 11577c478bd9Sstevel@tonic-gate { 11587c478bd9Sstevel@tonic-gate int index; 11597c478bd9Sstevel@tonic-gate vnode_t *vp; 11607c478bd9Sstevel@tonic-gate rnode4_t *rp, *r_hashf, *rlist; 11617c478bd9Sstevel@tonic-gate 11627c478bd9Sstevel@tonic-gate rlist = NULL; 11637c478bd9Sstevel@tonic-gate 11647c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 11657c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_WRITER); 11667c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 11677c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 11687c478bd9Sstevel@tonic-gate rp = r_hashf) { 11697c478bd9Sstevel@tonic-gate /* save the hash pointer before destroying */ 11707c478bd9Sstevel@tonic-gate r_hashf = rp->r_hashf; 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11737c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp) { 11747c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 11757c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 11767c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 11777c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 11787c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 11797c478bd9Sstevel@tonic-gate rp->r_hashf = rlist; 11807c478bd9Sstevel@tonic-gate rlist = rp; 11817c478bd9Sstevel@tonic-gate } else 11827c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 11837c478bd9Sstevel@tonic-gate } 11847c478bd9Sstevel@tonic-gate } 11857c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11867c478bd9Sstevel@tonic-gate } 11877c478bd9Sstevel@tonic-gate 11887c478bd9Sstevel@tonic-gate for (rp = rlist; rp != NULL; rp = r_hashf) { 11897c478bd9Sstevel@tonic-gate r_hashf = rp->r_hashf; 11907c478bd9Sstevel@tonic-gate /* 11917c478bd9Sstevel@tonic-gate * This call to rp4_addfree will end up destroying the 11927c478bd9Sstevel@tonic-gate * rnode, but in a safe way with the appropriate set 11937c478bd9Sstevel@tonic-gate * of checks done. 11947c478bd9Sstevel@tonic-gate */ 11957c478bd9Sstevel@tonic-gate rp4_addfree(rp, cr); 11967c478bd9Sstevel@tonic-gate } 11977c478bd9Sstevel@tonic-gate } 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12007c478bd9Sstevel@tonic-gate * This routine destroys all the resources of an rnode 12017c478bd9Sstevel@tonic-gate * and finally the rnode itself. 12027c478bd9Sstevel@tonic-gate */ 12037c478bd9Sstevel@tonic-gate static void 12047c478bd9Sstevel@tonic-gate destroy_rnode4(rnode4_t *rp) 12057c478bd9Sstevel@tonic-gate { 12067c478bd9Sstevel@tonic-gate vnode_t *vp; 12077c478bd9Sstevel@tonic-gate vfs_t *vfsp; 12087c478bd9Sstevel@tonic-gate 12097c478bd9Sstevel@tonic-gate ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE); 12107c478bd9Sstevel@tonic-gate 12117c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 12127c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp; 12137c478bd9Sstevel@tonic-gate 12147c478bd9Sstevel@tonic-gate uninit_rnode4(rp); 12157c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)&rnode4_new, -1); 12167c478bd9Sstevel@tonic-gate #ifdef DEBUG 12177c478bd9Sstevel@tonic-gate clstat4_debug.nrnode.value.ui64--; 12187c478bd9Sstevel@tonic-gate #endif 12197c478bd9Sstevel@tonic-gate kmem_cache_free(rnode4_cache, rp); 12207c478bd9Sstevel@tonic-gate vn_invalid(vp); 12217c478bd9Sstevel@tonic-gate vn_free(vp); 12227c478bd9Sstevel@tonic-gate VFS_RELE(vfsp); 12237c478bd9Sstevel@tonic-gate } 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate /* 12267c478bd9Sstevel@tonic-gate * Invalidate the attributes on all rnodes forcing the next getattr 12277c478bd9Sstevel@tonic-gate * to go over the wire. Used to flush stale uid and gid mappings. 12287c478bd9Sstevel@tonic-gate * Maybe done on a per vfsp, or all rnodes (vfsp == NULL) 12297c478bd9Sstevel@tonic-gate */ 12307c478bd9Sstevel@tonic-gate void 12317c478bd9Sstevel@tonic-gate nfs4_rnode_invalidate(struct vfs *vfsp) 12327c478bd9Sstevel@tonic-gate { 12337c478bd9Sstevel@tonic-gate int index; 12347c478bd9Sstevel@tonic-gate rnode4_t *rp; 12357c478bd9Sstevel@tonic-gate vnode_t *vp; 12367c478bd9Sstevel@tonic-gate 12377c478bd9Sstevel@tonic-gate /* 12387c478bd9Sstevel@tonic-gate * Walk the hash queues looking for rnodes. 12397c478bd9Sstevel@tonic-gate */ 12407c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 12417c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 12427c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 12437c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 12447c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 12457c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 12467c478bd9Sstevel@tonic-gate if (vfsp != NULL && vp->v_vfsp != vfsp) 12477c478bd9Sstevel@tonic-gate continue; 12487c478bd9Sstevel@tonic-gate 12497c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&rp->r_statelock)) 12507c478bd9Sstevel@tonic-gate continue; 12517c478bd9Sstevel@tonic-gate 12527c478bd9Sstevel@tonic-gate /* 12537c478bd9Sstevel@tonic-gate * Expire the attributes by resetting the change 12547c478bd9Sstevel@tonic-gate * and attr timeout. 12557c478bd9Sstevel@tonic-gate */ 12567c478bd9Sstevel@tonic-gate rp->r_change = 0; 12577c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4_LOCKED(rp); 12587c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 12597c478bd9Sstevel@tonic-gate } 12607c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 12617c478bd9Sstevel@tonic-gate } 12627c478bd9Sstevel@tonic-gate } 12637c478bd9Sstevel@tonic-gate 12647c478bd9Sstevel@tonic-gate /* 12657c478bd9Sstevel@tonic-gate * Flush all vnodes in this (or every) vfs. 12667c478bd9Sstevel@tonic-gate * Used by nfs_sync and by nfs_unmount. 12677c478bd9Sstevel@tonic-gate */ 12687c478bd9Sstevel@tonic-gate void 12697c478bd9Sstevel@tonic-gate r4flush(struct vfs *vfsp, cred_t *cr) 12707c478bd9Sstevel@tonic-gate { 12717c478bd9Sstevel@tonic-gate int index; 12727c478bd9Sstevel@tonic-gate rnode4_t *rp; 12737c478bd9Sstevel@tonic-gate vnode_t *vp, **vplist; 12747c478bd9Sstevel@tonic-gate long num, cnt; 12757c478bd9Sstevel@tonic-gate 12767c478bd9Sstevel@tonic-gate /* 12777c478bd9Sstevel@tonic-gate * Check to see whether there is anything to do. 12787c478bd9Sstevel@tonic-gate */ 12797c478bd9Sstevel@tonic-gate num = rnode4_new; 12807c478bd9Sstevel@tonic-gate if (num == 0) 12817c478bd9Sstevel@tonic-gate return; 12827c478bd9Sstevel@tonic-gate 12837c478bd9Sstevel@tonic-gate /* 12847c478bd9Sstevel@tonic-gate * Allocate a slot for all currently active rnodes on the 12857c478bd9Sstevel@tonic-gate * supposition that they all may need flushing. 12867c478bd9Sstevel@tonic-gate */ 12877c478bd9Sstevel@tonic-gate vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP); 12887c478bd9Sstevel@tonic-gate cnt = 0; 12897c478bd9Sstevel@tonic-gate 12907c478bd9Sstevel@tonic-gate /* 12917c478bd9Sstevel@tonic-gate * Walk the hash queues looking for rnodes with page 12927c478bd9Sstevel@tonic-gate * lists associated with them. Make a list of these 12937c478bd9Sstevel@tonic-gate * files. 12947c478bd9Sstevel@tonic-gate */ 12957c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 12967c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 12977c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 12987c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 12997c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 13007c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 13017c478bd9Sstevel@tonic-gate /* 13027c478bd9Sstevel@tonic-gate * Don't bother sync'ing a vp if it 13037c478bd9Sstevel@tonic-gate * is part of virtual swap device or 13047c478bd9Sstevel@tonic-gate * if VFS is read-only 13057c478bd9Sstevel@tonic-gate */ 13067c478bd9Sstevel@tonic-gate if (IS_SWAPVP(vp) || vn_is_readonly(vp)) 13077c478bd9Sstevel@tonic-gate continue; 13087c478bd9Sstevel@tonic-gate /* 13097c478bd9Sstevel@tonic-gate * If flushing all mounted file systems or 13107c478bd9Sstevel@tonic-gate * the vnode belongs to this vfs, has pages 13117c478bd9Sstevel@tonic-gate * and is marked as either dirty or mmap'd, 13127c478bd9Sstevel@tonic-gate * hold and add this vnode to the list of 13137c478bd9Sstevel@tonic-gate * vnodes to flush. 13147c478bd9Sstevel@tonic-gate */ 13157c478bd9Sstevel@tonic-gate if ((vfsp == NULL || vp->v_vfsp == vfsp) && 13167c478bd9Sstevel@tonic-gate nfs4_has_pages(vp) && 13177c478bd9Sstevel@tonic-gate ((rp->r_flags & R4DIRTY) || rp->r_mapcnt > 0)) { 13187c478bd9Sstevel@tonic-gate VN_HOLD(vp); 13197c478bd9Sstevel@tonic-gate vplist[cnt++] = vp; 13207c478bd9Sstevel@tonic-gate if (cnt == num) { 13217c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 13227c478bd9Sstevel@tonic-gate goto toomany; 13237c478bd9Sstevel@tonic-gate } 13247c478bd9Sstevel@tonic-gate } 13257c478bd9Sstevel@tonic-gate } 13267c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 13277c478bd9Sstevel@tonic-gate } 13287c478bd9Sstevel@tonic-gate toomany: 13297c478bd9Sstevel@tonic-gate 13307c478bd9Sstevel@tonic-gate /* 13317c478bd9Sstevel@tonic-gate * Flush and release all of the files on the list. 13327c478bd9Sstevel@tonic-gate */ 13337c478bd9Sstevel@tonic-gate while (cnt-- > 0) { 13347c478bd9Sstevel@tonic-gate vp = vplist[cnt]; 1335da6c28aaSamw (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr, NULL); 13367c478bd9Sstevel@tonic-gate VN_RELE(vp); 13377c478bd9Sstevel@tonic-gate } 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate /* 13407c478bd9Sstevel@tonic-gate * Free the space allocated to hold the list. 13417c478bd9Sstevel@tonic-gate */ 13427c478bd9Sstevel@tonic-gate kmem_free(vplist, num * sizeof (*vplist)); 13437c478bd9Sstevel@tonic-gate } 13447c478bd9Sstevel@tonic-gate 13457c478bd9Sstevel@tonic-gate int 13467c478bd9Sstevel@tonic-gate nfs4_free_data_reclaim(rnode4_t *rp) 13477c478bd9Sstevel@tonic-gate { 13487c478bd9Sstevel@tonic-gate char *contents; 13497c478bd9Sstevel@tonic-gate vnode_t *xattr; 13507c478bd9Sstevel@tonic-gate int size; 13517c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 13527c478bd9Sstevel@tonic-gate int freed; 13537c478bd9Sstevel@tonic-gate bool_t rdc = FALSE; 13547c478bd9Sstevel@tonic-gate 13557c478bd9Sstevel@tonic-gate /* 13567c478bd9Sstevel@tonic-gate * Free any held caches which may 13577c478bd9Sstevel@tonic-gate * be associated with this rnode. 13587c478bd9Sstevel@tonic-gate */ 13597c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 13607c478bd9Sstevel@tonic-gate if (rp->r_dir != NULL) 13617c478bd9Sstevel@tonic-gate rdc = TRUE; 13627c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 13637c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 13647c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 13657c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 13667c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 13677c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 13687c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 13697c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 13707c478bd9Sstevel@tonic-gate 13717c478bd9Sstevel@tonic-gate /* 13727c478bd9Sstevel@tonic-gate * Free the access cache entries. 13737c478bd9Sstevel@tonic-gate */ 13747c478bd9Sstevel@tonic-gate freed = nfs4_access_purge_rp(rp); 13757c478bd9Sstevel@tonic-gate 13767c478bd9Sstevel@tonic-gate if (rdc == FALSE && contents == NULL && vsp == NULL && xattr == NULL) 13777c478bd9Sstevel@tonic-gate return (freed); 13787c478bd9Sstevel@tonic-gate 13797c478bd9Sstevel@tonic-gate /* 13807c478bd9Sstevel@tonic-gate * Free the readdir cache entries, incompletely if we can't block. 13817c478bd9Sstevel@tonic-gate */ 13827c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(RTOV4(rp)); 13837c478bd9Sstevel@tonic-gate 13847c478bd9Sstevel@tonic-gate /* 13857c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 13867c478bd9Sstevel@tonic-gate */ 13877c478bd9Sstevel@tonic-gate if (contents != NULL) { 13887c478bd9Sstevel@tonic-gate 13897c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 13907c478bd9Sstevel@tonic-gate } 13917c478bd9Sstevel@tonic-gate 13927c478bd9Sstevel@tonic-gate /* 13937c478bd9Sstevel@tonic-gate * Free any cached ACL. 13947c478bd9Sstevel@tonic-gate */ 13957c478bd9Sstevel@tonic-gate if (vsp != NULL) 13967c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 13977c478bd9Sstevel@tonic-gate 13987c478bd9Sstevel@tonic-gate /* 13997c478bd9Sstevel@tonic-gate * Release the xattr directory vnode 14007c478bd9Sstevel@tonic-gate */ 14017c478bd9Sstevel@tonic-gate if (xattr != NULL) 14027c478bd9Sstevel@tonic-gate VN_RELE(xattr); 14037c478bd9Sstevel@tonic-gate 14047c478bd9Sstevel@tonic-gate return (1); 14057c478bd9Sstevel@tonic-gate } 14067c478bd9Sstevel@tonic-gate 14077c478bd9Sstevel@tonic-gate static int 14087c478bd9Sstevel@tonic-gate nfs4_active_data_reclaim(rnode4_t *rp) 14097c478bd9Sstevel@tonic-gate { 14107c478bd9Sstevel@tonic-gate char *contents; 14117c478bd9Sstevel@tonic-gate vnode_t *xattr; 14127c478bd9Sstevel@tonic-gate int size; 14137c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 14147c478bd9Sstevel@tonic-gate int freed; 14157c478bd9Sstevel@tonic-gate bool_t rdc = FALSE; 14167c478bd9Sstevel@tonic-gate 14177c478bd9Sstevel@tonic-gate /* 14187c478bd9Sstevel@tonic-gate * Free any held credentials and caches which 14197c478bd9Sstevel@tonic-gate * may be associated with this rnode. 14207c478bd9Sstevel@tonic-gate */ 14217c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&rp->r_statelock)) 14227c478bd9Sstevel@tonic-gate return (0); 14237c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 14247c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 14257c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 14267c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 14277c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 14287c478bd9Sstevel@tonic-gate if (rp->r_dir != NULL) 14297c478bd9Sstevel@tonic-gate rdc = TRUE; 14307c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 14317c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 14327c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 14337c478bd9Sstevel@tonic-gate 14347c478bd9Sstevel@tonic-gate /* 14357c478bd9Sstevel@tonic-gate * Free the access cache entries. 14367c478bd9Sstevel@tonic-gate */ 14377c478bd9Sstevel@tonic-gate freed = nfs4_access_purge_rp(rp); 14387c478bd9Sstevel@tonic-gate 14397c478bd9Sstevel@tonic-gate if (contents == NULL && vsp == NULL && rdc == FALSE && xattr == NULL) 14407c478bd9Sstevel@tonic-gate return (freed); 14417c478bd9Sstevel@tonic-gate 14427c478bd9Sstevel@tonic-gate /* 14437c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 14447c478bd9Sstevel@tonic-gate */ 14457c478bd9Sstevel@tonic-gate if (contents != NULL) { 14467c478bd9Sstevel@tonic-gate 14477c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 14487c478bd9Sstevel@tonic-gate } 14497c478bd9Sstevel@tonic-gate 14507c478bd9Sstevel@tonic-gate /* 14517c478bd9Sstevel@tonic-gate * Free any cached ACL. 14527c478bd9Sstevel@tonic-gate */ 14537c478bd9Sstevel@tonic-gate if (vsp != NULL) 14547c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 14557c478bd9Sstevel@tonic-gate 14567c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(RTOV4(rp)); 14577c478bd9Sstevel@tonic-gate 14587c478bd9Sstevel@tonic-gate /* 14597c478bd9Sstevel@tonic-gate * Release the xattr directory vnode 14607c478bd9Sstevel@tonic-gate */ 14617c478bd9Sstevel@tonic-gate if (xattr != NULL) 14627c478bd9Sstevel@tonic-gate VN_RELE(xattr); 14637c478bd9Sstevel@tonic-gate 14647c478bd9Sstevel@tonic-gate return (1); 14657c478bd9Sstevel@tonic-gate } 14667c478bd9Sstevel@tonic-gate 14677c478bd9Sstevel@tonic-gate static int 14687c478bd9Sstevel@tonic-gate nfs4_free_reclaim(void) 14697c478bd9Sstevel@tonic-gate { 14707c478bd9Sstevel@tonic-gate int freed; 14717c478bd9Sstevel@tonic-gate rnode4_t *rp; 14727c478bd9Sstevel@tonic-gate 14737c478bd9Sstevel@tonic-gate #ifdef DEBUG 14747c478bd9Sstevel@tonic-gate clstat4_debug.f_reclaim.value.ui64++; 14757c478bd9Sstevel@tonic-gate #endif 14767c478bd9Sstevel@tonic-gate freed = 0; 14777c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 14787c478bd9Sstevel@tonic-gate rp = rp4freelist; 14797c478bd9Sstevel@tonic-gate if (rp != NULL) { 14807c478bd9Sstevel@tonic-gate do { 14817c478bd9Sstevel@tonic-gate if (nfs4_free_data_reclaim(rp)) 14827c478bd9Sstevel@tonic-gate freed = 1; 14837c478bd9Sstevel@tonic-gate } while ((rp = rp->r_freef) != rp4freelist); 14847c478bd9Sstevel@tonic-gate } 14857c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 14867c478bd9Sstevel@tonic-gate return (freed); 14877c478bd9Sstevel@tonic-gate } 14887c478bd9Sstevel@tonic-gate 14897c478bd9Sstevel@tonic-gate static int 14907c478bd9Sstevel@tonic-gate nfs4_active_reclaim(void) 14917c478bd9Sstevel@tonic-gate { 14927c478bd9Sstevel@tonic-gate int freed; 14937c478bd9Sstevel@tonic-gate int index; 14947c478bd9Sstevel@tonic-gate rnode4_t *rp; 14957c478bd9Sstevel@tonic-gate 14967c478bd9Sstevel@tonic-gate #ifdef DEBUG 14977c478bd9Sstevel@tonic-gate clstat4_debug.a_reclaim.value.ui64++; 14987c478bd9Sstevel@tonic-gate #endif 14997c478bd9Sstevel@tonic-gate freed = 0; 15007c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 15017c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 15027c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 15037c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 15047c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 15057c478bd9Sstevel@tonic-gate if (nfs4_active_data_reclaim(rp)) 15067c478bd9Sstevel@tonic-gate freed = 1; 15077c478bd9Sstevel@tonic-gate } 15087c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 15097c478bd9Sstevel@tonic-gate } 15107c478bd9Sstevel@tonic-gate return (freed); 15117c478bd9Sstevel@tonic-gate } 15127c478bd9Sstevel@tonic-gate 15137c478bd9Sstevel@tonic-gate static int 15147c478bd9Sstevel@tonic-gate nfs4_rnode_reclaim(void) 15157c478bd9Sstevel@tonic-gate { 15167c478bd9Sstevel@tonic-gate int freed; 15177c478bd9Sstevel@tonic-gate rnode4_t *rp; 15187c478bd9Sstevel@tonic-gate vnode_t *vp; 15197c478bd9Sstevel@tonic-gate 15207c478bd9Sstevel@tonic-gate #ifdef DEBUG 15217c478bd9Sstevel@tonic-gate clstat4_debug.r_reclaim.value.ui64++; 15227c478bd9Sstevel@tonic-gate #endif 15237c478bd9Sstevel@tonic-gate freed = 0; 15247c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15257c478bd9Sstevel@tonic-gate while ((rp = rp4freelist) != NULL) { 15267c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 15277c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 15287c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 15297c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 15307c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 15317c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 15327c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 15337c478bd9Sstevel@tonic-gate vp->v_count--; 15347c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 15357c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 15367c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15377c478bd9Sstevel@tonic-gate continue; 15387c478bd9Sstevel@tonic-gate } 15397c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 15407c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 15417c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 15427c478bd9Sstevel@tonic-gate } 15437c478bd9Sstevel@tonic-gate /* 15447c478bd9Sstevel@tonic-gate * This call to rp_addfree will end up destroying the 15457c478bd9Sstevel@tonic-gate * rnode, but in a safe way with the appropriate set 15467c478bd9Sstevel@tonic-gate * of checks done. 15477c478bd9Sstevel@tonic-gate */ 15487c478bd9Sstevel@tonic-gate rp4_addfree(rp, CRED()); 15497c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 15507c478bd9Sstevel@tonic-gate } 15517c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 15527c478bd9Sstevel@tonic-gate return (freed); 15537c478bd9Sstevel@tonic-gate } 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 15567c478bd9Sstevel@tonic-gate static void 15577c478bd9Sstevel@tonic-gate nfs4_reclaim(void *cdrarg) 15587c478bd9Sstevel@tonic-gate { 15597c478bd9Sstevel@tonic-gate #ifdef DEBUG 15607c478bd9Sstevel@tonic-gate clstat4_debug.reclaim.value.ui64++; 15617c478bd9Sstevel@tonic-gate #endif 15627c478bd9Sstevel@tonic-gate if (nfs4_free_reclaim()) 15637c478bd9Sstevel@tonic-gate return; 15647c478bd9Sstevel@tonic-gate 15657c478bd9Sstevel@tonic-gate if (nfs4_active_reclaim()) 15667c478bd9Sstevel@tonic-gate return; 15677c478bd9Sstevel@tonic-gate 15687c478bd9Sstevel@tonic-gate (void) nfs4_rnode_reclaim(); 15697c478bd9Sstevel@tonic-gate } 15707c478bd9Sstevel@tonic-gate 15717c478bd9Sstevel@tonic-gate /* 15727c478bd9Sstevel@tonic-gate * Returns the clientid4 to use for the given mntinfo4. Note that the 15737c478bd9Sstevel@tonic-gate * clientid can change if the caller drops mi_recovlock. 15747c478bd9Sstevel@tonic-gate */ 15757c478bd9Sstevel@tonic-gate 15767c478bd9Sstevel@tonic-gate clientid4 15777c478bd9Sstevel@tonic-gate mi2clientid(mntinfo4_t *mi) 15787c478bd9Sstevel@tonic-gate { 15797c478bd9Sstevel@tonic-gate nfs4_server_t *sp; 15807c478bd9Sstevel@tonic-gate clientid4 clientid = 0; 15817c478bd9Sstevel@tonic-gate 15827c478bd9Sstevel@tonic-gate /* this locks down sp if it is found */ 15837c478bd9Sstevel@tonic-gate sp = find_nfs4_server(mi); 15847c478bd9Sstevel@tonic-gate if (sp != NULL) { 15857c478bd9Sstevel@tonic-gate clientid = sp->clientid; 15867c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 15877c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 15887c478bd9Sstevel@tonic-gate } 15897c478bd9Sstevel@tonic-gate return (clientid); 15907c478bd9Sstevel@tonic-gate } 15917c478bd9Sstevel@tonic-gate 15927c478bd9Sstevel@tonic-gate /* 15937c478bd9Sstevel@tonic-gate * Return the current lease time for the server associated with the given 15947c478bd9Sstevel@tonic-gate * file. Note that the lease time could change immediately after this 15957c478bd9Sstevel@tonic-gate * call. 15967c478bd9Sstevel@tonic-gate */ 15977c478bd9Sstevel@tonic-gate 15987c478bd9Sstevel@tonic-gate time_t 15997c478bd9Sstevel@tonic-gate r2lease_time(rnode4_t *rp) 16007c478bd9Sstevel@tonic-gate { 16017c478bd9Sstevel@tonic-gate nfs4_server_t *sp; 16027c478bd9Sstevel@tonic-gate time_t lease_time; 16037c478bd9Sstevel@tonic-gate mntinfo4_t *mi = VTOMI4(RTOV4(rp)); 16047c478bd9Sstevel@tonic-gate 16057c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0); 16067c478bd9Sstevel@tonic-gate 16077c478bd9Sstevel@tonic-gate /* this locks down sp if it is found */ 16087c478bd9Sstevel@tonic-gate sp = find_nfs4_server(VTOMI4(RTOV4(rp))); 16097c478bd9Sstevel@tonic-gate 16107c478bd9Sstevel@tonic-gate if (VTOMI4(RTOV4(rp))->mi_vfsp->vfs_flag & VFS_UNMOUNTED) { 16117c478bd9Sstevel@tonic-gate if (sp != NULL) { 16127c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 16137c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 16147c478bd9Sstevel@tonic-gate } 16157c478bd9Sstevel@tonic-gate nfs_rw_exit(&mi->mi_recovlock); 16167c478bd9Sstevel@tonic-gate return (1); /* 1 second */ 16177c478bd9Sstevel@tonic-gate } 16187c478bd9Sstevel@tonic-gate 16197c478bd9Sstevel@tonic-gate ASSERT(sp != NULL); 16207c478bd9Sstevel@tonic-gate 16217c478bd9Sstevel@tonic-gate lease_time = sp->s_lease_time; 16227c478bd9Sstevel@tonic-gate 16237c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 16247c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 16257c478bd9Sstevel@tonic-gate nfs_rw_exit(&mi->mi_recovlock); 16267c478bd9Sstevel@tonic-gate 16277c478bd9Sstevel@tonic-gate return (lease_time); 16287c478bd9Sstevel@tonic-gate } 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate /* 16317c478bd9Sstevel@tonic-gate * Return a list with information about all the known open instances for 16327c478bd9Sstevel@tonic-gate * a filesystem. The caller must call r4releopenlist() when done with the 16337c478bd9Sstevel@tonic-gate * list. 16347c478bd9Sstevel@tonic-gate * 16357c478bd9Sstevel@tonic-gate * We are safe at looking at os_valid and os_pending_close across dropping 16367c478bd9Sstevel@tonic-gate * the 'os_sync_lock' to count up the number of open streams and then 16377c478bd9Sstevel@tonic-gate * allocate memory for the osp list due to: 16387c478bd9Sstevel@tonic-gate * -Looking at os_pending_close is safe since this routine is 16397c478bd9Sstevel@tonic-gate * only called via recovery, and os_pending_close can only be set via 16407c478bd9Sstevel@tonic-gate * a non-recovery operation (which are all blocked when recovery 16417c478bd9Sstevel@tonic-gate * is active). 16427c478bd9Sstevel@tonic-gate * 16437c478bd9Sstevel@tonic-gate * -Examining os_valid is safe since non-recovery operations, which 16447c478bd9Sstevel@tonic-gate * could potentially switch os_valid to 0, are blocked (via 16457c478bd9Sstevel@tonic-gate * nfs4_start_fop) and recovery is single-threaded per mntinfo4_t 16467c478bd9Sstevel@tonic-gate * (which means we are the only recovery thread potentially acting 16477c478bd9Sstevel@tonic-gate * on this open stream). 16487c478bd9Sstevel@tonic-gate */ 16497c478bd9Sstevel@tonic-gate 16507c478bd9Sstevel@tonic-gate nfs4_opinst_t * 16517c478bd9Sstevel@tonic-gate r4mkopenlist(mntinfo4_t *mi) 16527c478bd9Sstevel@tonic-gate { 16537c478bd9Sstevel@tonic-gate nfs4_opinst_t *reopenlist, *rep; 16547c478bd9Sstevel@tonic-gate rnode4_t *rp; 16557c478bd9Sstevel@tonic-gate vnode_t *vp; 16567c478bd9Sstevel@tonic-gate vfs_t *vfsp = mi->mi_vfsp; 16577c478bd9Sstevel@tonic-gate int numosp; 16587c478bd9Sstevel@tonic-gate nfs4_open_stream_t *osp; 16597c478bd9Sstevel@tonic-gate int index; 16607c478bd9Sstevel@tonic-gate open_delegation_type4 dtype; 16617c478bd9Sstevel@tonic-gate int hold_vnode; 16627c478bd9Sstevel@tonic-gate 16637c478bd9Sstevel@tonic-gate reopenlist = NULL; 16647c478bd9Sstevel@tonic-gate 16657c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 16667c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 16677c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 16687c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 16697c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 16707c478bd9Sstevel@tonic-gate 16717c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 16727c478bd9Sstevel@tonic-gate if (vp->v_vfsp != vfsp) 16737c478bd9Sstevel@tonic-gate continue; 16747c478bd9Sstevel@tonic-gate hold_vnode = 0; 16757c478bd9Sstevel@tonic-gate 16767c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_os_lock); 16777c478bd9Sstevel@tonic-gate 16787c478bd9Sstevel@tonic-gate /* Count the number of valid open_streams of the file */ 16797c478bd9Sstevel@tonic-gate numosp = 0; 16807c478bd9Sstevel@tonic-gate for (osp = list_head(&rp->r_open_streams); osp != NULL; 16817c478bd9Sstevel@tonic-gate osp = list_next(&rp->r_open_streams, osp)) { 16827c478bd9Sstevel@tonic-gate mutex_enter(&osp->os_sync_lock); 16837c478bd9Sstevel@tonic-gate if (osp->os_valid && !osp->os_pending_close) 16847c478bd9Sstevel@tonic-gate numosp++; 16857c478bd9Sstevel@tonic-gate mutex_exit(&osp->os_sync_lock); 16867c478bd9Sstevel@tonic-gate } 16877c478bd9Sstevel@tonic-gate 16887c478bd9Sstevel@tonic-gate /* Fill in the valid open streams per vp */ 16897c478bd9Sstevel@tonic-gate if (numosp > 0) { 16907c478bd9Sstevel@tonic-gate int j; 16917c478bd9Sstevel@tonic-gate 16927c478bd9Sstevel@tonic-gate hold_vnode = 1; 16937c478bd9Sstevel@tonic-gate 16947c478bd9Sstevel@tonic-gate /* 16957c478bd9Sstevel@tonic-gate * Add a new open instance to the list 16967c478bd9Sstevel@tonic-gate */ 16977c478bd9Sstevel@tonic-gate rep = kmem_zalloc(sizeof (*reopenlist), 1698b9238976Sth KM_SLEEP); 16997c478bd9Sstevel@tonic-gate rep->re_next = reopenlist; 17007c478bd9Sstevel@tonic-gate reopenlist = rep; 17017c478bd9Sstevel@tonic-gate 17027c478bd9Sstevel@tonic-gate rep->re_vp = vp; 17037c478bd9Sstevel@tonic-gate rep->re_osp = kmem_zalloc( 1704b9238976Sth numosp * sizeof (*(rep->re_osp)), 1705b9238976Sth KM_SLEEP); 17067c478bd9Sstevel@tonic-gate rep->re_numosp = numosp; 17077c478bd9Sstevel@tonic-gate 17087c478bd9Sstevel@tonic-gate j = 0; 17097c478bd9Sstevel@tonic-gate for (osp = list_head(&rp->r_open_streams); 17107c478bd9Sstevel@tonic-gate osp != NULL; 17117c478bd9Sstevel@tonic-gate osp = list_next(&rp->r_open_streams, osp)) { 17127c478bd9Sstevel@tonic-gate 17137c478bd9Sstevel@tonic-gate mutex_enter(&osp->os_sync_lock); 17147c478bd9Sstevel@tonic-gate if (osp->os_valid && 17157c478bd9Sstevel@tonic-gate !osp->os_pending_close) { 17167c478bd9Sstevel@tonic-gate osp->os_ref_count++; 17177c478bd9Sstevel@tonic-gate rep->re_osp[j] = osp; 17187c478bd9Sstevel@tonic-gate j++; 17197c478bd9Sstevel@tonic-gate } 17207c478bd9Sstevel@tonic-gate mutex_exit(&osp->os_sync_lock); 17217c478bd9Sstevel@tonic-gate } 17227c478bd9Sstevel@tonic-gate /* 17237c478bd9Sstevel@tonic-gate * Assuming valid osp(s) stays valid between 17247c478bd9Sstevel@tonic-gate * the time obtaining j and numosp. 17257c478bd9Sstevel@tonic-gate */ 17267c478bd9Sstevel@tonic-gate ASSERT(j == numosp); 17277c478bd9Sstevel@tonic-gate } 17287c478bd9Sstevel@tonic-gate 17297c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 17307c478bd9Sstevel@tonic-gate /* do this here to keep v_lock > r_os_lock */ 17317c478bd9Sstevel@tonic-gate if (hold_vnode) 17327c478bd9Sstevel@tonic-gate VN_HOLD(vp); 17337c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statev4_lock); 17347c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 17357c478bd9Sstevel@tonic-gate /* 17367c478bd9Sstevel@tonic-gate * If this rnode holds a delegation, 17377c478bd9Sstevel@tonic-gate * but if there are no valid open streams, 17387c478bd9Sstevel@tonic-gate * then just discard the delegation 17397c478bd9Sstevel@tonic-gate * without doing delegreturn. 17407c478bd9Sstevel@tonic-gate */ 17417c478bd9Sstevel@tonic-gate if (numosp > 0) 17427c478bd9Sstevel@tonic-gate rp->r_deleg_needs_recovery = 1743b9238976Sth rp->r_deleg_type; 17447c478bd9Sstevel@tonic-gate } 17457c478bd9Sstevel@tonic-gate /* Save the delegation type for use outside the lock */ 17467c478bd9Sstevel@tonic-gate dtype = rp->r_deleg_type; 17477c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statev4_lock); 17487c478bd9Sstevel@tonic-gate 17497c478bd9Sstevel@tonic-gate /* 17507c478bd9Sstevel@tonic-gate * If we have a delegation then get rid of it. 17517c478bd9Sstevel@tonic-gate * We've set rp->r_deleg_needs_recovery so we have 17527c478bd9Sstevel@tonic-gate * enough information to recover. 17537c478bd9Sstevel@tonic-gate */ 17547c478bd9Sstevel@tonic-gate if (dtype != OPEN_DELEGATE_NONE) { 17557c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, NFS4_DR_DISCARD); 17567c478bd9Sstevel@tonic-gate } 17577c478bd9Sstevel@tonic-gate } 17587c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 17597c478bd9Sstevel@tonic-gate } 17607c478bd9Sstevel@tonic-gate return (reopenlist); 17617c478bd9Sstevel@tonic-gate } 17627c478bd9Sstevel@tonic-gate 17637c478bd9Sstevel@tonic-gate /* 17647c478bd9Sstevel@tonic-gate * Release the list of open instance references. 17657c478bd9Sstevel@tonic-gate */ 17667c478bd9Sstevel@tonic-gate 17677c478bd9Sstevel@tonic-gate void 17687c478bd9Sstevel@tonic-gate r4releopenlist(nfs4_opinst_t *reopenp) 17697c478bd9Sstevel@tonic-gate { 17707c478bd9Sstevel@tonic-gate nfs4_opinst_t *rep, *next; 17717c478bd9Sstevel@tonic-gate int i; 17727c478bd9Sstevel@tonic-gate 17737c478bd9Sstevel@tonic-gate for (rep = reopenp; rep; rep = next) { 17747c478bd9Sstevel@tonic-gate next = rep->re_next; 17757c478bd9Sstevel@tonic-gate 17767c478bd9Sstevel@tonic-gate for (i = 0; i < rep->re_numosp; i++) 1777b9238976Sth open_stream_rele(rep->re_osp[i], VTOR4(rep->re_vp)); 17787c478bd9Sstevel@tonic-gate 17797c478bd9Sstevel@tonic-gate VN_RELE(rep->re_vp); 17807c478bd9Sstevel@tonic-gate kmem_free(rep->re_osp, 17817c478bd9Sstevel@tonic-gate rep->re_numosp * sizeof (*(rep->re_osp))); 17827c478bd9Sstevel@tonic-gate 17837c478bd9Sstevel@tonic-gate kmem_free(rep, sizeof (*rep)); 17847c478bd9Sstevel@tonic-gate } 17857c478bd9Sstevel@tonic-gate } 17867c478bd9Sstevel@tonic-gate 17877c478bd9Sstevel@tonic-gate int 17887c478bd9Sstevel@tonic-gate nfs4_rnode_init(void) 17897c478bd9Sstevel@tonic-gate { 17907c478bd9Sstevel@tonic-gate ulong_t nrnode4_max; 17917c478bd9Sstevel@tonic-gate int i; 17927c478bd9Sstevel@tonic-gate 17937c478bd9Sstevel@tonic-gate /* 17947c478bd9Sstevel@tonic-gate * Compute the size of the rnode4 hash table 17957c478bd9Sstevel@tonic-gate */ 17967c478bd9Sstevel@tonic-gate if (nrnode <= 0) 17977c478bd9Sstevel@tonic-gate nrnode = ncsize; 17987c478bd9Sstevel@tonic-gate nrnode4_max = 17997c478bd9Sstevel@tonic-gate (ulong_t)((kmem_maxavail() >> 2) / sizeof (struct rnode4)); 18007c478bd9Sstevel@tonic-gate if (nrnode > nrnode4_max || (nrnode == 0 && ncsize == 0)) { 18017c478bd9Sstevel@tonic-gate zcmn_err(GLOBAL_ZONEID, CE_NOTE, 18027c478bd9Sstevel@tonic-gate "setting nrnode to max value of %ld", nrnode4_max); 18037c478bd9Sstevel@tonic-gate nrnode = nrnode4_max; 18047c478bd9Sstevel@tonic-gate } 18057c478bd9Sstevel@tonic-gate rtable4size = 1 << highbit(nrnode / rnode4_hashlen); 18067c478bd9Sstevel@tonic-gate rtable4mask = rtable4size - 1; 18077c478bd9Sstevel@tonic-gate 18087c478bd9Sstevel@tonic-gate /* 18097c478bd9Sstevel@tonic-gate * Allocate and initialize the hash buckets 18107c478bd9Sstevel@tonic-gate */ 18117c478bd9Sstevel@tonic-gate rtable4 = kmem_alloc(rtable4size * sizeof (*rtable4), KM_SLEEP); 18127c478bd9Sstevel@tonic-gate for (i = 0; i < rtable4size; i++) { 18137c478bd9Sstevel@tonic-gate rtable4[i].r_hashf = (rnode4_t *)(&rtable4[i]); 18147c478bd9Sstevel@tonic-gate rtable4[i].r_hashb = (rnode4_t *)(&rtable4[i]); 18157c478bd9Sstevel@tonic-gate rw_init(&rtable4[i].r_lock, NULL, RW_DEFAULT, NULL); 18167c478bd9Sstevel@tonic-gate } 18177c478bd9Sstevel@tonic-gate 18187c478bd9Sstevel@tonic-gate rnode4_cache = kmem_cache_create("rnode4_cache", sizeof (rnode4_t), 18197c478bd9Sstevel@tonic-gate 0, NULL, NULL, nfs4_reclaim, NULL, NULL, 0); 18207c478bd9Sstevel@tonic-gate 18217c478bd9Sstevel@tonic-gate return (0); 18227c478bd9Sstevel@tonic-gate } 18237c478bd9Sstevel@tonic-gate 18247c478bd9Sstevel@tonic-gate int 18257c478bd9Sstevel@tonic-gate nfs4_rnode_fini(void) 18267c478bd9Sstevel@tonic-gate { 18277c478bd9Sstevel@tonic-gate int i; 18287c478bd9Sstevel@tonic-gate 18297c478bd9Sstevel@tonic-gate /* 18307c478bd9Sstevel@tonic-gate * Deallocate the rnode hash queues 18317c478bd9Sstevel@tonic-gate */ 18327c478bd9Sstevel@tonic-gate kmem_cache_destroy(rnode4_cache); 18337c478bd9Sstevel@tonic-gate 18347c478bd9Sstevel@tonic-gate for (i = 0; i < rtable4size; i++) 18357c478bd9Sstevel@tonic-gate rw_destroy(&rtable4[i].r_lock); 18367c478bd9Sstevel@tonic-gate 18377c478bd9Sstevel@tonic-gate kmem_free(rtable4, rtable4size * sizeof (*rtable4)); 18387c478bd9Sstevel@tonic-gate 18397c478bd9Sstevel@tonic-gate return (0); 18407c478bd9Sstevel@tonic-gate } 18417c478bd9Sstevel@tonic-gate 18427c478bd9Sstevel@tonic-gate /* 18437c478bd9Sstevel@tonic-gate * Return non-zero if the given filehandle refers to the root filehandle 18447c478bd9Sstevel@tonic-gate * for the given rnode. 18457c478bd9Sstevel@tonic-gate */ 18467c478bd9Sstevel@tonic-gate 18477c478bd9Sstevel@tonic-gate static int 18487c478bd9Sstevel@tonic-gate isrootfh(nfs4_sharedfh_t *fh, rnode4_t *rp) 18497c478bd9Sstevel@tonic-gate { 18507c478bd9Sstevel@tonic-gate int isroot; 18517c478bd9Sstevel@tonic-gate 18527c478bd9Sstevel@tonic-gate isroot = 0; 18537c478bd9Sstevel@tonic-gate if (SFH4_SAME(VTOMI4(RTOV4(rp))->mi_rootfh, fh)) 18547c478bd9Sstevel@tonic-gate isroot = 1; 18557c478bd9Sstevel@tonic-gate 18567c478bd9Sstevel@tonic-gate return (isroot); 18577c478bd9Sstevel@tonic-gate } 18587c478bd9Sstevel@tonic-gate 1859b9238976Sth /* 1860b9238976Sth * The r4_stub_* routines assume that the rnode is newly activated, and 1861b9238976Sth * that the caller either holds the hash bucket r_lock for this rnode as 1862b9238976Sth * RW_WRITER, or holds r_statelock. 1863b9238976Sth */ 1864b9238976Sth static void 1865b9238976Sth r4_stub_set(rnode4_t *rp, nfs4_stub_type_t type) 1866b9238976Sth { 1867b9238976Sth vnode_t *vp = RTOV4(rp); 1868b9238976Sth krwlock_t *hash_lock = &rp->r_hashq->r_lock; 1869b9238976Sth 1870b9238976Sth ASSERT(RW_WRITE_HELD(hash_lock) || MUTEX_HELD(&rp->r_statelock)); 1871b9238976Sth 1872b9238976Sth rp->r_stub_type = type; 1873b9238976Sth 1874b9238976Sth /* 1875b9238976Sth * Safely switch this vnode to the trigger vnodeops. 1876b9238976Sth * 1877b9238976Sth * Currently, we don't ever switch a trigger vnode back to using 1878b9238976Sth * "regular" v4 vnodeops. NFS4_STUB_NONE is only used to note that 1879b9238976Sth * a new v4 object is not a trigger, and it will already have the 1880b9238976Sth * correct v4 vnodeops by default. So, no "else" case required here. 1881b9238976Sth */ 1882b9238976Sth if (type != NFS4_STUB_NONE) 1883b9238976Sth vn_setops(vp, nfs4_trigger_vnodeops); 1884b9238976Sth } 1885b9238976Sth 1886b9238976Sth void 1887b9238976Sth r4_stub_mirrormount(rnode4_t *rp) 1888b9238976Sth { 1889b9238976Sth r4_stub_set(rp, NFS4_STUB_MIRRORMOUNT); 1890b9238976Sth } 1891b9238976Sth 1892b9238976Sth void 1893b9238976Sth r4_stub_none(rnode4_t *rp) 1894b9238976Sth { 1895b9238976Sth r4_stub_set(rp, NFS4_STUB_NONE); 1896b9238976Sth } 1897b9238976Sth 18987c478bd9Sstevel@tonic-gate #ifdef DEBUG 18997c478bd9Sstevel@tonic-gate 19007c478bd9Sstevel@tonic-gate /* 19017c478bd9Sstevel@tonic-gate * Look in the rnode table for other rnodes that have the same filehandle. 19027c478bd9Sstevel@tonic-gate * Assume the lock is held for the hash chain of checkrp 19037c478bd9Sstevel@tonic-gate */ 19047c478bd9Sstevel@tonic-gate 19057c478bd9Sstevel@tonic-gate static void 19067c478bd9Sstevel@tonic-gate r4_dup_check(rnode4_t *checkrp, vfs_t *vfsp) 19077c478bd9Sstevel@tonic-gate { 19087c478bd9Sstevel@tonic-gate rnode4_t *rp; 19097c478bd9Sstevel@tonic-gate vnode_t *tvp; 19107c478bd9Sstevel@tonic-gate nfs4_fhandle_t fh, fh2; 19117c478bd9Sstevel@tonic-gate int index; 19127c478bd9Sstevel@tonic-gate 19137c478bd9Sstevel@tonic-gate if (!r4_check_for_dups) 19147c478bd9Sstevel@tonic-gate return; 19157c478bd9Sstevel@tonic-gate 19167c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&checkrp->r_hashq->r_lock)); 19177c478bd9Sstevel@tonic-gate 19187c478bd9Sstevel@tonic-gate sfh4_copyval(checkrp->r_fh, &fh); 19197c478bd9Sstevel@tonic-gate 19207c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 19217c478bd9Sstevel@tonic-gate 19227c478bd9Sstevel@tonic-gate if (&rtable4[index] != checkrp->r_hashq) 19237c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 19247c478bd9Sstevel@tonic-gate 19257c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 19267c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 19277c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 19287c478bd9Sstevel@tonic-gate 19297c478bd9Sstevel@tonic-gate if (rp == checkrp) 19307c478bd9Sstevel@tonic-gate continue; 19317c478bd9Sstevel@tonic-gate 19327c478bd9Sstevel@tonic-gate tvp = RTOV4(rp); 19337c478bd9Sstevel@tonic-gate if (tvp->v_vfsp != vfsp) 19347c478bd9Sstevel@tonic-gate continue; 19357c478bd9Sstevel@tonic-gate 19367c478bd9Sstevel@tonic-gate sfh4_copyval(rp->r_fh, &fh2); 19377c478bd9Sstevel@tonic-gate if (nfs4cmpfhandle(&fh, &fh2) == 0) { 19387c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "rnodes with same fs, fh " 19397c478bd9Sstevel@tonic-gate "(%p, %p)", (void *)checkrp, (void *)rp); 19407c478bd9Sstevel@tonic-gate } 19417c478bd9Sstevel@tonic-gate } 19427c478bd9Sstevel@tonic-gate 19437c478bd9Sstevel@tonic-gate if (&rtable4[index] != checkrp->r_hashq) 19447c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 19457c478bd9Sstevel@tonic-gate } 19467c478bd9Sstevel@tonic-gate } 19477c478bd9Sstevel@tonic-gate 19487c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 1949