17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate /* 237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate /* 287c478bd9Sstevel@tonic-gate * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 297c478bd9Sstevel@tonic-gate * All Rights Reserved 307c478bd9Sstevel@tonic-gate */ 317c478bd9Sstevel@tonic-gate 327c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 337c478bd9Sstevel@tonic-gate 347c478bd9Sstevel@tonic-gate #include <sys/param.h> 357c478bd9Sstevel@tonic-gate #include <sys/types.h> 367c478bd9Sstevel@tonic-gate #include <sys/systm.h> 377c478bd9Sstevel@tonic-gate #include <sys/cred.h> 387c478bd9Sstevel@tonic-gate #include <sys/proc.h> 397c478bd9Sstevel@tonic-gate #include <sys/user.h> 407c478bd9Sstevel@tonic-gate #include <sys/time.h> 417c478bd9Sstevel@tonic-gate #include <sys/buf.h> 427c478bd9Sstevel@tonic-gate #include <sys/vfs.h> 437c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 447c478bd9Sstevel@tonic-gate #include <sys/socket.h> 457c478bd9Sstevel@tonic-gate #include <sys/uio.h> 467c478bd9Sstevel@tonic-gate #include <sys/tiuser.h> 477c478bd9Sstevel@tonic-gate #include <sys/swap.h> 487c478bd9Sstevel@tonic-gate #include <sys/errno.h> 497c478bd9Sstevel@tonic-gate #include <sys/debug.h> 507c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 517c478bd9Sstevel@tonic-gate #include <sys/kstat.h> 527c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 537c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 547c478bd9Sstevel@tonic-gate #include <sys/session.h> 557c478bd9Sstevel@tonic-gate #include <sys/dnlc.h> 567c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 577c478bd9Sstevel@tonic-gate #include <sys/acl.h> 587c478bd9Sstevel@tonic-gate #include <sys/ddi.h> 597c478bd9Sstevel@tonic-gate #include <sys/pathname.h> 607c478bd9Sstevel@tonic-gate #include <sys/flock.h> 617c478bd9Sstevel@tonic-gate #include <sys/dirent.h> 627c478bd9Sstevel@tonic-gate #include <sys/flock.h> 637c478bd9Sstevel@tonic-gate #include <sys/callb.h> 647c478bd9Sstevel@tonic-gate 657c478bd9Sstevel@tonic-gate #include <rpc/types.h> 667c478bd9Sstevel@tonic-gate #include <rpc/xdr.h> 677c478bd9Sstevel@tonic-gate #include <rpc/auth.h> 687c478bd9Sstevel@tonic-gate #include <rpc/rpcsec_gss.h> 697c478bd9Sstevel@tonic-gate #include <rpc/clnt.h> 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate #include <nfs/nfs.h> 727c478bd9Sstevel@tonic-gate #include <nfs/nfs_clnt.h> 737c478bd9Sstevel@tonic-gate #include <nfs/nfs_acl.h> 747c478bd9Sstevel@tonic-gate 757c478bd9Sstevel@tonic-gate #include <nfs/nfs4.h> 767c478bd9Sstevel@tonic-gate #include <nfs/rnode4.h> 777c478bd9Sstevel@tonic-gate #include <nfs/nfs4_clnt.h> 787c478bd9Sstevel@tonic-gate 797c478bd9Sstevel@tonic-gate /* 807c478bd9Sstevel@tonic-gate * The hash queues for the access to active and cached rnodes 817c478bd9Sstevel@tonic-gate * are organized as doubly linked lists. A reader/writer lock 827c478bd9Sstevel@tonic-gate * for each hash bucket is used to control access and to synchronize 837c478bd9Sstevel@tonic-gate * lookups, additions, and deletions from the hash queue. 847c478bd9Sstevel@tonic-gate * 857c478bd9Sstevel@tonic-gate * The rnode freelist is organized as a doubly linked list with 867c478bd9Sstevel@tonic-gate * a head pointer. Additions and deletions are synchronized via 877c478bd9Sstevel@tonic-gate * a single mutex. 887c478bd9Sstevel@tonic-gate * 897c478bd9Sstevel@tonic-gate * In order to add an rnode to the free list, it must be hashed into 907c478bd9Sstevel@tonic-gate * a hash queue and the exclusive lock to the hash queue be held. 917c478bd9Sstevel@tonic-gate * If an rnode is not hashed into a hash queue, then it is destroyed 927c478bd9Sstevel@tonic-gate * because it represents no valuable information that can be reused 937c478bd9Sstevel@tonic-gate * about the file. The exclusive lock to the hash queue must be 947c478bd9Sstevel@tonic-gate * held in order to prevent a lookup in the hash queue from finding 957c478bd9Sstevel@tonic-gate * the rnode and using it and assuming that the rnode is not on the 967c478bd9Sstevel@tonic-gate * freelist. The lookup in the hash queue will have the hash queue 977c478bd9Sstevel@tonic-gate * locked, either exclusive or shared. 987c478bd9Sstevel@tonic-gate * 997c478bd9Sstevel@tonic-gate * The vnode reference count for each rnode is not allowed to drop 1007c478bd9Sstevel@tonic-gate * below 1. This prevents external entities, such as the VM 1017c478bd9Sstevel@tonic-gate * subsystem, from acquiring references to vnodes already on the 1027c478bd9Sstevel@tonic-gate * freelist and then trying to place them back on the freelist 1037c478bd9Sstevel@tonic-gate * when their reference is released. This means that the when an 1047c478bd9Sstevel@tonic-gate * rnode is looked up in the hash queues, then either the rnode 1057c478bd9Sstevel@tonic-gate * is removed from the freelist and that reference is tranfered to 1067c478bd9Sstevel@tonic-gate * the new reference or the vnode reference count must be incremented 1077c478bd9Sstevel@tonic-gate * accordingly. The mutex for the freelist must be held in order to 1087c478bd9Sstevel@tonic-gate * accurately test to see if the rnode is on the freelist or not. 1097c478bd9Sstevel@tonic-gate * The hash queue lock might be held shared and it is possible that 1107c478bd9Sstevel@tonic-gate * two different threads may race to remove the rnode from the 1117c478bd9Sstevel@tonic-gate * freelist. This race can be resolved by holding the mutex for the 1127c478bd9Sstevel@tonic-gate * freelist. Please note that the mutex for the freelist does not 1137c478bd9Sstevel@tonic-gate * need to be held if the rnode is not on the freelist. It can not be 1147c478bd9Sstevel@tonic-gate * placed on the freelist due to the requirement that the thread 1157c478bd9Sstevel@tonic-gate * putting the rnode on the freelist must hold the exclusive lock 1167c478bd9Sstevel@tonic-gate * to the hash queue and the thread doing the lookup in the hash 1177c478bd9Sstevel@tonic-gate * queue is holding either a shared or exclusive lock to the hash 1187c478bd9Sstevel@tonic-gate * queue. 1197c478bd9Sstevel@tonic-gate * 1207c478bd9Sstevel@tonic-gate * The lock ordering is: 1217c478bd9Sstevel@tonic-gate * 1227c478bd9Sstevel@tonic-gate * hash bucket lock -> vnode lock 1232d1fef97Ssamf * hash bucket lock -> freelist lock -> r_statelock 1247c478bd9Sstevel@tonic-gate */ 1257c478bd9Sstevel@tonic-gate r4hashq_t *rtable4; 1267c478bd9Sstevel@tonic-gate 1277c478bd9Sstevel@tonic-gate static kmutex_t rp4freelist_lock; 1287c478bd9Sstevel@tonic-gate static rnode4_t *rp4freelist = NULL; 1297c478bd9Sstevel@tonic-gate static long rnode4_new = 0; 1307c478bd9Sstevel@tonic-gate int rtable4size; 1317c478bd9Sstevel@tonic-gate static int rtable4mask; 1327c478bd9Sstevel@tonic-gate static struct kmem_cache *rnode4_cache; 1337c478bd9Sstevel@tonic-gate static int rnode4_hashlen = 4; 1347c478bd9Sstevel@tonic-gate 1357c478bd9Sstevel@tonic-gate static void r4inactive(rnode4_t *, cred_t *); 1367c478bd9Sstevel@tonic-gate static vnode_t *make_rnode4(nfs4_sharedfh_t *, r4hashq_t *, struct vfs *, 1377c478bd9Sstevel@tonic-gate struct vnodeops *, 1387c478bd9Sstevel@tonic-gate int (*)(vnode_t *, page_t *, u_offset_t *, size_t *, int, 1397c478bd9Sstevel@tonic-gate cred_t *), 1407c478bd9Sstevel@tonic-gate int *, cred_t *); 1417c478bd9Sstevel@tonic-gate static void rp4_rmfree(rnode4_t *); 1427c478bd9Sstevel@tonic-gate int nfs4_free_data_reclaim(rnode4_t *); 1437c478bd9Sstevel@tonic-gate static int nfs4_active_data_reclaim(rnode4_t *); 1447c478bd9Sstevel@tonic-gate static int nfs4_free_reclaim(void); 1457c478bd9Sstevel@tonic-gate static int nfs4_active_reclaim(void); 1467c478bd9Sstevel@tonic-gate static int nfs4_rnode_reclaim(void); 1477c478bd9Sstevel@tonic-gate static void nfs4_reclaim(void *); 1487c478bd9Sstevel@tonic-gate static int isrootfh(nfs4_sharedfh_t *, rnode4_t *); 1497c478bd9Sstevel@tonic-gate static void uninit_rnode4(rnode4_t *); 1507c478bd9Sstevel@tonic-gate static void destroy_rnode4(rnode4_t *); 1517c478bd9Sstevel@tonic-gate 1527c478bd9Sstevel@tonic-gate #ifdef DEBUG 1537c478bd9Sstevel@tonic-gate static int r4_check_for_dups = 0; /* Flag to enable dup rnode detection. */ 1547c478bd9Sstevel@tonic-gate static int nfs4_rnode_debug = 0; 1557c478bd9Sstevel@tonic-gate /* if nonzero, kmem_cache_free() rnodes rather than place on freelist */ 1567c478bd9Sstevel@tonic-gate static int nfs4_rnode_nofreelist = 0; 1577c478bd9Sstevel@tonic-gate /* give messages on colliding shared filehandles */ 1587c478bd9Sstevel@tonic-gate static void r4_dup_check(rnode4_t *, vfs_t *); 1597c478bd9Sstevel@tonic-gate #endif 1607c478bd9Sstevel@tonic-gate 1617c478bd9Sstevel@tonic-gate /* 1627c478bd9Sstevel@tonic-gate * Free the resources associated with an rnode. 1637c478bd9Sstevel@tonic-gate */ 1647c478bd9Sstevel@tonic-gate static void 1657c478bd9Sstevel@tonic-gate r4inactive(rnode4_t *rp, cred_t *cr) 1667c478bd9Sstevel@tonic-gate { 1677c478bd9Sstevel@tonic-gate vnode_t *vp; 1687c478bd9Sstevel@tonic-gate char *contents; 1697c478bd9Sstevel@tonic-gate int size; 1707c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 1717c478bd9Sstevel@tonic-gate vnode_t *xattr; 1727c478bd9Sstevel@tonic-gate int error; 1737c478bd9Sstevel@tonic-gate 1747c478bd9Sstevel@tonic-gate /* 1757c478bd9Sstevel@tonic-gate * Before freeing anything, wait until all asynchronous 1767c478bd9Sstevel@tonic-gate * activity is done on this rnode. This will allow all 1777c478bd9Sstevel@tonic-gate * asynchronous read ahead and write behind i/o's to 1787c478bd9Sstevel@tonic-gate * finish. 1797c478bd9Sstevel@tonic-gate */ 1807c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 1817c478bd9Sstevel@tonic-gate while (rp->r_count > 0) 1827c478bd9Sstevel@tonic-gate cv_wait(&rp->r_cv, &rp->r_statelock); 1837c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate /* 1867c478bd9Sstevel@tonic-gate * Flush and invalidate all pages associated with the vnode. 1877c478bd9Sstevel@tonic-gate */ 1887c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 1897c478bd9Sstevel@tonic-gate if (nfs4_has_pages(vp)) { 1907c478bd9Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 1917c478bd9Sstevel@tonic-gate if ((rp->r_flags & R4DIRTY) && !rp->r_error) { 1927c478bd9Sstevel@tonic-gate error = VOP_PUTPAGE(vp, (u_offset_t)0, 0, 0, cr); 1937c478bd9Sstevel@tonic-gate if (error && (error == ENOSPC || error == EDQUOT)) { 1947c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 1957c478bd9Sstevel@tonic-gate if (!rp->r_error) 1967c478bd9Sstevel@tonic-gate rp->r_error = error; 1977c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 1987c478bd9Sstevel@tonic-gate } 1997c478bd9Sstevel@tonic-gate } 2007c478bd9Sstevel@tonic-gate nfs4_invalidate_pages(vp, (u_offset_t)0, cr); 2017c478bd9Sstevel@tonic-gate } 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate /* 2047c478bd9Sstevel@tonic-gate * Free any held caches which may be 2057c478bd9Sstevel@tonic-gate * associated with this rnode. 2067c478bd9Sstevel@tonic-gate */ 2077c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 2087c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 2097c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 2107c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 2117c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 2127c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 2137c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 2147c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 2157c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 2167c478bd9Sstevel@tonic-gate 2177c478bd9Sstevel@tonic-gate /* 2187c478bd9Sstevel@tonic-gate * Free the access cache entries. 2197c478bd9Sstevel@tonic-gate */ 2207c478bd9Sstevel@tonic-gate (void) nfs4_access_purge_rp(rp); 2217c478bd9Sstevel@tonic-gate 2227c478bd9Sstevel@tonic-gate /* 2237c478bd9Sstevel@tonic-gate * Free the readdir cache entries. 2247c478bd9Sstevel@tonic-gate */ 2257c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(vp); 2267c478bd9Sstevel@tonic-gate 2277c478bd9Sstevel@tonic-gate /* 2287c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 2297c478bd9Sstevel@tonic-gate */ 2307c478bd9Sstevel@tonic-gate if (contents != NULL) { 2317c478bd9Sstevel@tonic-gate 2327c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 2337c478bd9Sstevel@tonic-gate } 2347c478bd9Sstevel@tonic-gate 2357c478bd9Sstevel@tonic-gate /* 2367c478bd9Sstevel@tonic-gate * Free any cached ACL. 2377c478bd9Sstevel@tonic-gate */ 2387c478bd9Sstevel@tonic-gate if (vsp != NULL) 2397c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 2407c478bd9Sstevel@tonic-gate 2417c478bd9Sstevel@tonic-gate /* 2427c478bd9Sstevel@tonic-gate * Release the cached xattr_dir 2437c478bd9Sstevel@tonic-gate */ 2447c478bd9Sstevel@tonic-gate if (xattr != NULL) 2457c478bd9Sstevel@tonic-gate VN_RELE(xattr); 2467c478bd9Sstevel@tonic-gate } 2477c478bd9Sstevel@tonic-gate 2487c478bd9Sstevel@tonic-gate /* 2497c478bd9Sstevel@tonic-gate * We have seen a case that the fh passed in is for "." which 2507c478bd9Sstevel@tonic-gate * should be a VROOT node, however, the fh is different from the 2517c478bd9Sstevel@tonic-gate * root fh stored in the mntinfo4_t. The invalid fh might be 2527c478bd9Sstevel@tonic-gate * from a misbehaved server and will panic the client system at 2537c478bd9Sstevel@tonic-gate * a later time. To avoid the panic, we drop the bad fh, use 2547c478bd9Sstevel@tonic-gate * the root fh from mntinfo4_t, and print an error message 2557c478bd9Sstevel@tonic-gate * for attention. 2567c478bd9Sstevel@tonic-gate */ 2577c478bd9Sstevel@tonic-gate nfs4_sharedfh_t * 2587c478bd9Sstevel@tonic-gate badrootfh_check(nfs4_sharedfh_t *fh, nfs4_fname_t *nm, mntinfo4_t *mi, 2597c478bd9Sstevel@tonic-gate int *wasbad) 2607c478bd9Sstevel@tonic-gate { 2617c478bd9Sstevel@tonic-gate char *s; 2627c478bd9Sstevel@tonic-gate 2637c478bd9Sstevel@tonic-gate *wasbad = 0; 2647c478bd9Sstevel@tonic-gate s = fn_name(nm); 2657c478bd9Sstevel@tonic-gate ASSERT(strcmp(s, "..") != 0); 2667c478bd9Sstevel@tonic-gate 2677c478bd9Sstevel@tonic-gate if ((s[0] == '.' && s[1] == '\0') && fh && 2687c478bd9Sstevel@tonic-gate !SFH4_SAME(mi->mi_rootfh, fh)) { 2697c478bd9Sstevel@tonic-gate #ifdef DEBUG 2707c478bd9Sstevel@tonic-gate nfs4_fhandle_t fhandle; 2717c478bd9Sstevel@tonic-gate 2727c478bd9Sstevel@tonic-gate zcmn_err(mi->mi_zone->zone_id, CE_WARN, 2737c478bd9Sstevel@tonic-gate "Server %s returns a different " 2747c478bd9Sstevel@tonic-gate "root filehandle for the path %s:", 2757c478bd9Sstevel@tonic-gate mi->mi_curr_serv->sv_hostname, 2767c478bd9Sstevel@tonic-gate mi->mi_curr_serv->sv_path); 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate /* print the bad fh */ 2797c478bd9Sstevel@tonic-gate fhandle.fh_len = fh->sfh_fh.nfs_fh4_len; 2807c478bd9Sstevel@tonic-gate bcopy(fh->sfh_fh.nfs_fh4_val, fhandle.fh_buf, 2817c478bd9Sstevel@tonic-gate fhandle.fh_len); 2827c478bd9Sstevel@tonic-gate nfs4_printfhandle(&fhandle); 2837c478bd9Sstevel@tonic-gate 2847c478bd9Sstevel@tonic-gate /* print mi_rootfh */ 2857c478bd9Sstevel@tonic-gate fhandle.fh_len = mi->mi_rootfh->sfh_fh.nfs_fh4_len; 2867c478bd9Sstevel@tonic-gate bcopy(mi->mi_rootfh->sfh_fh.nfs_fh4_val, fhandle.fh_buf, 2877c478bd9Sstevel@tonic-gate fhandle.fh_len); 2887c478bd9Sstevel@tonic-gate nfs4_printfhandle(&fhandle); 2897c478bd9Sstevel@tonic-gate #endif 2907c478bd9Sstevel@tonic-gate /* use mi_rootfh instead; fh will be rele by the caller */ 2917c478bd9Sstevel@tonic-gate fh = mi->mi_rootfh; 2927c478bd9Sstevel@tonic-gate *wasbad = 1; 2937c478bd9Sstevel@tonic-gate } 2947c478bd9Sstevel@tonic-gate 2957c478bd9Sstevel@tonic-gate kmem_free(s, MAXNAMELEN); 2967c478bd9Sstevel@tonic-gate return (fh); 2977c478bd9Sstevel@tonic-gate } 2987c478bd9Sstevel@tonic-gate 2997c478bd9Sstevel@tonic-gate void 3007c478bd9Sstevel@tonic-gate r4_do_attrcache(vnode_t *vp, nfs4_ga_res_t *garp, int newnode, 3017c478bd9Sstevel@tonic-gate hrtime_t t, cred_t *cr, int index) 3027c478bd9Sstevel@tonic-gate { 3037c478bd9Sstevel@tonic-gate vattr_t *attr; 3047c478bd9Sstevel@tonic-gate /* 3057c478bd9Sstevel@tonic-gate * Don't add to attrcache if time overflow, but 3067c478bd9Sstevel@tonic-gate * no need to check because either attr is null or the time 3077c478bd9Sstevel@tonic-gate * values in it were processed by nfs4_time_ntov(), which checks 3087c478bd9Sstevel@tonic-gate * for time overflows. 3097c478bd9Sstevel@tonic-gate */ 3107c478bd9Sstevel@tonic-gate attr = garp ? &garp->n4g_va : NULL; 3117c478bd9Sstevel@tonic-gate 3127c478bd9Sstevel@tonic-gate if (attr) { 3137c478bd9Sstevel@tonic-gate if (!newnode) { 3147c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 3157c478bd9Sstevel@tonic-gate #ifdef DEBUG 3167c478bd9Sstevel@tonic-gate if (vp->v_type != attr->va_type && 3177c478bd9Sstevel@tonic-gate vp->v_type != VNON && attr->va_type != VNON) { 3187c478bd9Sstevel@tonic-gate zcmn_err(VTOMI4(vp)->mi_zone->zone_id, CE_WARN, 3197c478bd9Sstevel@tonic-gate "makenfs4node: type (%d) doesn't " 3207c478bd9Sstevel@tonic-gate "match type of found node at %p (%d)", 3217c478bd9Sstevel@tonic-gate attr->va_type, (void *)vp, vp->v_type); 3227c478bd9Sstevel@tonic-gate } 3237c478bd9Sstevel@tonic-gate #endif 3247c478bd9Sstevel@tonic-gate nfs4_attr_cache(vp, garp, t, cr, TRUE, NULL); 3257c478bd9Sstevel@tonic-gate } else { 3267c478bd9Sstevel@tonic-gate rnode4_t *rp = VTOR4(vp); 3277c478bd9Sstevel@tonic-gate 3287c478bd9Sstevel@tonic-gate vp->v_type = attr->va_type; 3297c478bd9Sstevel@tonic-gate vp->v_rdev = attr->va_rdev; 3307c478bd9Sstevel@tonic-gate 3317c478bd9Sstevel@tonic-gate /* 3327c478bd9Sstevel@tonic-gate * Turn this object into a "stub" object if we 3337c478bd9Sstevel@tonic-gate * crossed an underlying server fs boundary. To 3347c478bd9Sstevel@tonic-gate * make this check, during mount we save the 3357c478bd9Sstevel@tonic-gate * fsid of the server object being mounted. 3367c478bd9Sstevel@tonic-gate * Here we compare this object's server fsid 3377c478bd9Sstevel@tonic-gate * with the fsid we saved at mount. If they 3387c478bd9Sstevel@tonic-gate * are different, we crossed server fs boundary. 3397c478bd9Sstevel@tonic-gate * 3407c478bd9Sstevel@tonic-gate * The stub flag is set (or not) at rnode 3417c478bd9Sstevel@tonic-gate * creation time and it never changes for life 3427c478bd9Sstevel@tonic-gate * of rnode. 3437c478bd9Sstevel@tonic-gate * 3447c478bd9Sstevel@tonic-gate * We don't bother with taking r_state_lock 3457c478bd9Sstevel@tonic-gate * to set R4SRVSTUB flag because this is a new 3467c478bd9Sstevel@tonic-gate * rnode and we're holding rtable lock. No other 3477c478bd9Sstevel@tonic-gate * thread could have obtained access to this 3487c478bd9Sstevel@tonic-gate * rnode. 3497c478bd9Sstevel@tonic-gate */ 3507c478bd9Sstevel@tonic-gate if (garp->n4g_fsid_valid) { 3517c478bd9Sstevel@tonic-gate rp->r_srv_fsid = garp->n4g_fsid; 3527c478bd9Sstevel@tonic-gate 3537c478bd9Sstevel@tonic-gate if (vp->v_type == VDIR) { 3547c478bd9Sstevel@tonic-gate servinfo4_t *svp = rp->r_server; 3557c478bd9Sstevel@tonic-gate 3567c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&svp->sv_lock, 3577c478bd9Sstevel@tonic-gate RW_READER, 0); 3587c478bd9Sstevel@tonic-gate if (!FATTR4_FSID_EQ(&garp->n4g_fsid, 3597c478bd9Sstevel@tonic-gate &svp->sv_fsid)) { 3607c478bd9Sstevel@tonic-gate rp->r_flags |= R4SRVSTUB; 3617c478bd9Sstevel@tonic-gate } 3627c478bd9Sstevel@tonic-gate nfs_rw_exit(&svp->sv_lock); 3637c478bd9Sstevel@tonic-gate } 3647c478bd9Sstevel@tonic-gate } 3657c478bd9Sstevel@tonic-gate 3667c478bd9Sstevel@tonic-gate /* Can not cache partial attr */ 3677c478bd9Sstevel@tonic-gate if (attr->va_mask == AT_ALL) 3687c478bd9Sstevel@tonic-gate nfs4_attrcache_noinval(vp, garp, t); 3697c478bd9Sstevel@tonic-gate else 3707c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4(vp); 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 3737c478bd9Sstevel@tonic-gate } 3747c478bd9Sstevel@tonic-gate } else { 3757c478bd9Sstevel@tonic-gate if (newnode) { 3767c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4(vp); 3777c478bd9Sstevel@tonic-gate } 3787c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 3797c478bd9Sstevel@tonic-gate } 3807c478bd9Sstevel@tonic-gate } 3817c478bd9Sstevel@tonic-gate 3827c478bd9Sstevel@tonic-gate /* 3837c478bd9Sstevel@tonic-gate * Find or create an rnode based primarily on filehandle. To be 3847c478bd9Sstevel@tonic-gate * used when dvp (vnode for parent directory) is not available; 3857c478bd9Sstevel@tonic-gate * otherwise, makenfs4node() should be used. 3867c478bd9Sstevel@tonic-gate * 3877c478bd9Sstevel@tonic-gate * The nfs4_fname_t argument *npp is consumed and nulled out. 3887c478bd9Sstevel@tonic-gate */ 3897c478bd9Sstevel@tonic-gate 3907c478bd9Sstevel@tonic-gate vnode_t * 3917c478bd9Sstevel@tonic-gate makenfs4node_by_fh(nfs4_sharedfh_t *sfh, nfs4_sharedfh_t *psfh, 3927c478bd9Sstevel@tonic-gate nfs4_fname_t **npp, nfs4_ga_res_t *garp, 3937c478bd9Sstevel@tonic-gate mntinfo4_t *mi, cred_t *cr, hrtime_t t) 3947c478bd9Sstevel@tonic-gate { 3957c478bd9Sstevel@tonic-gate vfs_t *vfsp = mi->mi_vfsp; 3967c478bd9Sstevel@tonic-gate int newnode = 0; 3977c478bd9Sstevel@tonic-gate vnode_t *vp; 3987c478bd9Sstevel@tonic-gate rnode4_t *rp; 3997c478bd9Sstevel@tonic-gate svnode_t *svp; 4007c478bd9Sstevel@tonic-gate nfs4_fname_t *name; 4017c478bd9Sstevel@tonic-gate int index; 4027c478bd9Sstevel@tonic-gate 4037c478bd9Sstevel@tonic-gate ASSERT(npp && *npp); 4047c478bd9Sstevel@tonic-gate name = *npp; 4057c478bd9Sstevel@tonic-gate *npp = NULL; 4067c478bd9Sstevel@tonic-gate 4077c478bd9Sstevel@tonic-gate index = rtable4hash(sfh); 4087c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 4097c478bd9Sstevel@tonic-gate 4107c478bd9Sstevel@tonic-gate rp = r4find(&rtable4[index], sfh, vfsp); 4117c478bd9Sstevel@tonic-gate if (rp != NULL) { 4127c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 4137c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 4147c478bd9Sstevel@tonic-gate fn_rele(&name); 4157c478bd9Sstevel@tonic-gate return (vp); 4167c478bd9Sstevel@tonic-gate } 4177c478bd9Sstevel@tonic-gate 4187c478bd9Sstevel@tonic-gate vp = make_rnode4(sfh, &rtable4[index], vfsp, 4197c478bd9Sstevel@tonic-gate nfs4_vnodeops, nfs4_putapage, &newnode, cr); 4207c478bd9Sstevel@tonic-gate if (newnode) { 4217c478bd9Sstevel@tonic-gate svp = vtosv(vp); 4227c478bd9Sstevel@tonic-gate svp->sv_forw = svp->sv_back = svp; 4237c478bd9Sstevel@tonic-gate svp->sv_name = name; 4247c478bd9Sstevel@tonic-gate if (psfh != NULL) 4257c478bd9Sstevel@tonic-gate sfh4_hold(psfh); 4267c478bd9Sstevel@tonic-gate svp->sv_dfh = psfh; 4277c478bd9Sstevel@tonic-gate } else { 4287c478bd9Sstevel@tonic-gate fn_rele(&name); 4297c478bd9Sstevel@tonic-gate } 4307c478bd9Sstevel@tonic-gate 4317c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock)); 4327c478bd9Sstevel@tonic-gate r4_do_attrcache(vp, garp, newnode, t, cr, index); 4337c478bd9Sstevel@tonic-gate ASSERT(rw_owner(&rtable4[index].r_lock) != curthread); 4347c478bd9Sstevel@tonic-gate 4357c478bd9Sstevel@tonic-gate return (vp); 4367c478bd9Sstevel@tonic-gate } 4377c478bd9Sstevel@tonic-gate 4387c478bd9Sstevel@tonic-gate /* 4397c478bd9Sstevel@tonic-gate * Find or create a vnode for the given filehandle, filesystem, parent, and 4407c478bd9Sstevel@tonic-gate * name. The reference to nm is consumed, so the caller must first do an 4417c478bd9Sstevel@tonic-gate * fn_hold() if it wants to continue using nm after this call. 4427c478bd9Sstevel@tonic-gate */ 4437c478bd9Sstevel@tonic-gate vnode_t * 4447c478bd9Sstevel@tonic-gate makenfs4node(nfs4_sharedfh_t *fh, nfs4_ga_res_t *garp, struct vfs *vfsp, 4457c478bd9Sstevel@tonic-gate hrtime_t t, cred_t *cr, vnode_t *dvp, nfs4_fname_t *nm) 4467c478bd9Sstevel@tonic-gate { 4477c478bd9Sstevel@tonic-gate vnode_t *vp; 4487c478bd9Sstevel@tonic-gate int newnode; 4497c478bd9Sstevel@tonic-gate int index; 4507c478bd9Sstevel@tonic-gate mntinfo4_t *mi = VFTOMI4(vfsp); 4517c478bd9Sstevel@tonic-gate int had_badfh = 0; 4527c478bd9Sstevel@tonic-gate rnode4_t *rp; 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate ASSERT(dvp != NULL); 4557c478bd9Sstevel@tonic-gate 4567c478bd9Sstevel@tonic-gate fh = badrootfh_check(fh, nm, mi, &had_badfh); 4577c478bd9Sstevel@tonic-gate 4587c478bd9Sstevel@tonic-gate index = rtable4hash(fh); 4597c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 4607c478bd9Sstevel@tonic-gate 4617c478bd9Sstevel@tonic-gate /* 4627c478bd9Sstevel@tonic-gate * Note: make_rnode4() may upgrade the hash bucket lock to exclusive. 4637c478bd9Sstevel@tonic-gate */ 4647c478bd9Sstevel@tonic-gate vp = make_rnode4(fh, &rtable4[index], vfsp, nfs4_vnodeops, 4657c478bd9Sstevel@tonic-gate nfs4_putapage, &newnode, cr); 4667c478bd9Sstevel@tonic-gate 4677c478bd9Sstevel@tonic-gate rp = VTOR4(vp); 4687c478bd9Sstevel@tonic-gate sv_activate(&vp, dvp, &nm, newnode); 4697c478bd9Sstevel@tonic-gate if (dvp->v_flag & V_XATTRDIR) { 4707c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 4717c478bd9Sstevel@tonic-gate rp->r_flags |= R4ISXATTR; 4727c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 4737c478bd9Sstevel@tonic-gate } 4747c478bd9Sstevel@tonic-gate 4757c478bd9Sstevel@tonic-gate /* if getting a bad file handle, do not cache the attributes. */ 4767c478bd9Sstevel@tonic-gate if (had_badfh) { 4777c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 4787c478bd9Sstevel@tonic-gate return (vp); 4797c478bd9Sstevel@tonic-gate } 4807c478bd9Sstevel@tonic-gate 4817c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rtable4[index].r_lock)); 4827c478bd9Sstevel@tonic-gate r4_do_attrcache(vp, garp, newnode, t, cr, index); 4837c478bd9Sstevel@tonic-gate ASSERT(rw_owner(&rtable4[index].r_lock) != curthread); 4847c478bd9Sstevel@tonic-gate 4857c478bd9Sstevel@tonic-gate return (vp); 4867c478bd9Sstevel@tonic-gate } 4877c478bd9Sstevel@tonic-gate 4887c478bd9Sstevel@tonic-gate /* 4897c478bd9Sstevel@tonic-gate * Hash on address of filehandle object. 4907c478bd9Sstevel@tonic-gate * XXX totally untuned. 4917c478bd9Sstevel@tonic-gate */ 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate int 4947c478bd9Sstevel@tonic-gate rtable4hash(nfs4_sharedfh_t *fh) 4957c478bd9Sstevel@tonic-gate { 4967c478bd9Sstevel@tonic-gate return (((uintptr_t)fh / sizeof (*fh)) & rtable4mask); 4977c478bd9Sstevel@tonic-gate } 4987c478bd9Sstevel@tonic-gate 4997c478bd9Sstevel@tonic-gate /* 5007c478bd9Sstevel@tonic-gate * Find or create the vnode for the given filehandle and filesystem. 5017c478bd9Sstevel@tonic-gate * *newnode is set to zero if the vnode already existed; non-zero if it had 5027c478bd9Sstevel@tonic-gate * to be created. 5037c478bd9Sstevel@tonic-gate * 5047c478bd9Sstevel@tonic-gate * Note: make_rnode4() may upgrade the hash bucket lock to exclusive. 5057c478bd9Sstevel@tonic-gate */ 5067c478bd9Sstevel@tonic-gate 5077c478bd9Sstevel@tonic-gate static vnode_t * 5087c478bd9Sstevel@tonic-gate make_rnode4(nfs4_sharedfh_t *fh, r4hashq_t *rhtp, struct vfs *vfsp, 5097c478bd9Sstevel@tonic-gate struct vnodeops *vops, 5107c478bd9Sstevel@tonic-gate int (*putapage)(vnode_t *, page_t *, u_offset_t *, size_t *, int, cred_t *), 5117c478bd9Sstevel@tonic-gate int *newnode, cred_t *cr) 5127c478bd9Sstevel@tonic-gate { 5137c478bd9Sstevel@tonic-gate rnode4_t *rp; 5147c478bd9Sstevel@tonic-gate rnode4_t *trp; 5157c478bd9Sstevel@tonic-gate vnode_t *vp; 5167c478bd9Sstevel@tonic-gate mntinfo4_t *mi; 5177c478bd9Sstevel@tonic-gate 5187c478bd9Sstevel@tonic-gate ASSERT(RW_READ_HELD(&rhtp->r_lock)); 5197c478bd9Sstevel@tonic-gate 5207c478bd9Sstevel@tonic-gate mi = VFTOMI4(vfsp); 5217c478bd9Sstevel@tonic-gate 5227c478bd9Sstevel@tonic-gate start: 5237c478bd9Sstevel@tonic-gate if ((rp = r4find(rhtp, fh, vfsp)) != NULL) { 5247c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 5257c478bd9Sstevel@tonic-gate *newnode = 0; 5267c478bd9Sstevel@tonic-gate return (vp); 5277c478bd9Sstevel@tonic-gate } 5287c478bd9Sstevel@tonic-gate rw_exit(&rhtp->r_lock); 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 5317c478bd9Sstevel@tonic-gate 5327c478bd9Sstevel@tonic-gate if (rp4freelist != NULL && rnode4_new >= nrnode) { 5337c478bd9Sstevel@tonic-gate rp = rp4freelist; 5347c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 5357c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 5367c478bd9Sstevel@tonic-gate 5377c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 5387c478bd9Sstevel@tonic-gate 5397c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 5407c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 5417c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 5427c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 5437c478bd9Sstevel@tonic-gate vp->v_count--; 5447c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 5457c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 5467c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 5477c478bd9Sstevel@tonic-gate goto start; 5487c478bd9Sstevel@tonic-gate } 5497c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 5507c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 5517c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 5527c478bd9Sstevel@tonic-gate } 5537c478bd9Sstevel@tonic-gate 5547c478bd9Sstevel@tonic-gate r4inactive(rp, cr); 5557c478bd9Sstevel@tonic-gate 5567c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 5577c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 5587c478bd9Sstevel@tonic-gate vp->v_count--; 5597c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 5607c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 5617c478bd9Sstevel@tonic-gate goto start; 5627c478bd9Sstevel@tonic-gate } 5637c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 5647c478bd9Sstevel@tonic-gate vn_invalid(vp); 5657c478bd9Sstevel@tonic-gate 5667c478bd9Sstevel@tonic-gate /* 5677c478bd9Sstevel@tonic-gate * destroy old locks before bzero'ing and 5687c478bd9Sstevel@tonic-gate * recreating the locks below. 5697c478bd9Sstevel@tonic-gate */ 5707c478bd9Sstevel@tonic-gate uninit_rnode4(rp); 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate /* 5737c478bd9Sstevel@tonic-gate * Make sure that if rnode is recycled then 5747c478bd9Sstevel@tonic-gate * VFS count is decremented properly before 5757c478bd9Sstevel@tonic-gate * reuse. 5767c478bd9Sstevel@tonic-gate */ 5777c478bd9Sstevel@tonic-gate VFS_RELE(vp->v_vfsp); 5787c478bd9Sstevel@tonic-gate vn_reinit(vp); 5797c478bd9Sstevel@tonic-gate } else { 5807c478bd9Sstevel@tonic-gate vnode_t *new_vp; 5817c478bd9Sstevel@tonic-gate 5827c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 5837c478bd9Sstevel@tonic-gate 5847c478bd9Sstevel@tonic-gate rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP); 5857c478bd9Sstevel@tonic-gate new_vp = vn_alloc(KM_SLEEP); 5867c478bd9Sstevel@tonic-gate 5877c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)&rnode4_new, 1); 5887c478bd9Sstevel@tonic-gate #ifdef DEBUG 5897c478bd9Sstevel@tonic-gate clstat4_debug.nrnode.value.ui64++; 5907c478bd9Sstevel@tonic-gate #endif 5917c478bd9Sstevel@tonic-gate vp = new_vp; 5927c478bd9Sstevel@tonic-gate } 5937c478bd9Sstevel@tonic-gate 5947c478bd9Sstevel@tonic-gate bzero(rp, sizeof (*rp)); 5957c478bd9Sstevel@tonic-gate rp->r_vnode = vp; 5967c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL); 5977c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL); 5987c478bd9Sstevel@tonic-gate mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL); 5997c478bd9Sstevel@tonic-gate mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL); 6007c478bd9Sstevel@tonic-gate mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL); 6017c478bd9Sstevel@tonic-gate mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL); 6027c478bd9Sstevel@tonic-gate rp->created_v4 = 0; 6037c478bd9Sstevel@tonic-gate list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t), 6047c478bd9Sstevel@tonic-gate offsetof(nfs4_open_stream_t, os_node)); 6057c478bd9Sstevel@tonic-gate rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head; 6067c478bd9Sstevel@tonic-gate rp->r_lo_head.lo_next_rnode = &rp->r_lo_head; 6077c478bd9Sstevel@tonic-gate cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL); 6087c478bd9Sstevel@tonic-gate cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL); 6097c478bd9Sstevel@tonic-gate rp->r_flags = R4READDIRWATTR; 6107c478bd9Sstevel@tonic-gate rp->r_fh = fh; 6117c478bd9Sstevel@tonic-gate rp->r_hashq = rhtp; 6127c478bd9Sstevel@tonic-gate sfh4_hold(rp->r_fh); 6137c478bd9Sstevel@tonic-gate rp->r_server = mi->mi_curr_serv; 6147c478bd9Sstevel@tonic-gate rp->r_deleg_type = OPEN_DELEGATE_NONE; 6157c478bd9Sstevel@tonic-gate rp->r_deleg_needs_recovery = OPEN_DELEGATE_NONE; 6167c478bd9Sstevel@tonic-gate nfs_rw_init(&rp->r_deleg_recall_lock, NULL, RW_DEFAULT, NULL); 6177c478bd9Sstevel@tonic-gate 6187c478bd9Sstevel@tonic-gate rddir4_cache_create(rp); 6197c478bd9Sstevel@tonic-gate rp->r_putapage = putapage; 6207c478bd9Sstevel@tonic-gate vn_setops(vp, vops); 6217c478bd9Sstevel@tonic-gate vp->v_data = (caddr_t)rp; 6227c478bd9Sstevel@tonic-gate vp->v_vfsp = vfsp; 6237c478bd9Sstevel@tonic-gate VFS_HOLD(vfsp); 6247c478bd9Sstevel@tonic-gate vp->v_type = VNON; 6257c478bd9Sstevel@tonic-gate if (isrootfh(fh, rp)) 6267c478bd9Sstevel@tonic-gate vp->v_flag = VROOT; 6277c478bd9Sstevel@tonic-gate vn_exists(vp); 6287c478bd9Sstevel@tonic-gate 6297c478bd9Sstevel@tonic-gate /* 6307c478bd9Sstevel@tonic-gate * There is a race condition if someone else 6317c478bd9Sstevel@tonic-gate * alloc's the rnode while no locks are held, so we 6327c478bd9Sstevel@tonic-gate * check again and recover if found. 6337c478bd9Sstevel@tonic-gate */ 6347c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_WRITER); 6357c478bd9Sstevel@tonic-gate if ((trp = r4find(rhtp, fh, vfsp)) != NULL) { 6367c478bd9Sstevel@tonic-gate vp = RTOV4(trp); 6377c478bd9Sstevel@tonic-gate *newnode = 0; 6387c478bd9Sstevel@tonic-gate rw_exit(&rhtp->r_lock); 6397c478bd9Sstevel@tonic-gate rp4_addfree(rp, cr); 6407c478bd9Sstevel@tonic-gate rw_enter(&rhtp->r_lock, RW_READER); 6417c478bd9Sstevel@tonic-gate return (vp); 6427c478bd9Sstevel@tonic-gate } 6437c478bd9Sstevel@tonic-gate rp4_addhash(rp); 6447c478bd9Sstevel@tonic-gate *newnode = 1; 6457c478bd9Sstevel@tonic-gate return (vp); 6467c478bd9Sstevel@tonic-gate } 6477c478bd9Sstevel@tonic-gate 6487c478bd9Sstevel@tonic-gate static void 6497c478bd9Sstevel@tonic-gate uninit_rnode4(rnode4_t *rp) 6507c478bd9Sstevel@tonic-gate { 6517c478bd9Sstevel@tonic-gate vnode_t *vp = RTOV4(rp); 6527c478bd9Sstevel@tonic-gate 6537c478bd9Sstevel@tonic-gate ASSERT(rp != NULL); 6547c478bd9Sstevel@tonic-gate ASSERT(vp != NULL); 6557c478bd9Sstevel@tonic-gate ASSERT(vp->v_count == 1); 6567c478bd9Sstevel@tonic-gate ASSERT(rp->r_count == 0); 6577c478bd9Sstevel@tonic-gate ASSERT(rp->r_mapcnt == 0); 6587c478bd9Sstevel@tonic-gate if (rp->r_flags & R4LODANGLERS) { 6597c478bd9Sstevel@tonic-gate nfs4_flush_lock_owners(rp); 6607c478bd9Sstevel@tonic-gate } 6617c478bd9Sstevel@tonic-gate ASSERT(rp->r_lo_head.lo_next_rnode == &rp->r_lo_head); 6627c478bd9Sstevel@tonic-gate ASSERT(rp->r_lo_head.lo_prev_rnode == &rp->r_lo_head); 6637c478bd9Sstevel@tonic-gate ASSERT(!(rp->r_flags & R4HASHED)); 6647c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); 6657c478bd9Sstevel@tonic-gate nfs4_clear_open_streams(rp); 6667c478bd9Sstevel@tonic-gate list_destroy(&rp->r_open_streams); 6677c478bd9Sstevel@tonic-gate 6687c478bd9Sstevel@tonic-gate /* 6697c478bd9Sstevel@tonic-gate * Destroy the rddir cache first since we need to grab the r_statelock. 6707c478bd9Sstevel@tonic-gate */ 6717c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 6727c478bd9Sstevel@tonic-gate rddir4_cache_destroy(rp); 6737c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 6747c478bd9Sstevel@tonic-gate sv_uninit(&rp->r_svnode); 6757c478bd9Sstevel@tonic-gate sfh4_rele(&rp->r_fh); 6767c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_rwlock); 6777c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_lkserlock); 6787c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_statelock); 6797c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_statev4_lock); 6807c478bd9Sstevel@tonic-gate mutex_destroy(&rp->r_os_lock); 6817c478bd9Sstevel@tonic-gate cv_destroy(&rp->r_cv); 6827c478bd9Sstevel@tonic-gate cv_destroy(&rp->r_commit.c_cv); 6837c478bd9Sstevel@tonic-gate nfs_rw_destroy(&rp->r_deleg_recall_lock); 6847c478bd9Sstevel@tonic-gate if (rp->r_flags & R4DELMAPLIST) 6857c478bd9Sstevel@tonic-gate list_destroy(&rp->r_indelmap); 6867c478bd9Sstevel@tonic-gate } 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate /* 6897c478bd9Sstevel@tonic-gate * Put an rnode on the free list. 6907c478bd9Sstevel@tonic-gate * 6917c478bd9Sstevel@tonic-gate * Rnodes which were allocated above and beyond the normal limit 6927c478bd9Sstevel@tonic-gate * are immediately freed. 6937c478bd9Sstevel@tonic-gate */ 6947c478bd9Sstevel@tonic-gate void 6957c478bd9Sstevel@tonic-gate rp4_addfree(rnode4_t *rp, cred_t *cr) 6967c478bd9Sstevel@tonic-gate { 6977c478bd9Sstevel@tonic-gate vnode_t *vp; 6987c478bd9Sstevel@tonic-gate vnode_t *xattr; 6997c478bd9Sstevel@tonic-gate struct vfs *vfsp; 7007c478bd9Sstevel@tonic-gate 7017c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 7027c478bd9Sstevel@tonic-gate ASSERT(vp->v_count >= 1); 7037c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); 7047c478bd9Sstevel@tonic-gate 7057c478bd9Sstevel@tonic-gate /* 7067c478bd9Sstevel@tonic-gate * If we have too many rnodes allocated and there are no 7077c478bd9Sstevel@tonic-gate * references to this rnode, or if the rnode is no longer 7087c478bd9Sstevel@tonic-gate * accessible by it does not reside in the hash queues, 7097c478bd9Sstevel@tonic-gate * or if an i/o error occurred while writing to the file, 7107c478bd9Sstevel@tonic-gate * then just free it instead of putting it on the rnode 7117c478bd9Sstevel@tonic-gate * freelist. 7127c478bd9Sstevel@tonic-gate */ 7137c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp; 7147c478bd9Sstevel@tonic-gate if (((rnode4_new > nrnode || !(rp->r_flags & R4HASHED) || 7157c478bd9Sstevel@tonic-gate #ifdef DEBUG 7167c478bd9Sstevel@tonic-gate (nfs4_rnode_nofreelist != 0) || 7177c478bd9Sstevel@tonic-gate #endif 7187c478bd9Sstevel@tonic-gate rp->r_error || (rp->r_flags & R4RECOVERR) || 7197c478bd9Sstevel@tonic-gate (vfsp->vfs_flag & VFS_UNMOUNTED)) && rp->r_count == 0)) { 7207c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 7217c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 7227c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 7237c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 7247c478bd9Sstevel@tonic-gate vp->v_count--; 7257c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7267c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7277c478bd9Sstevel@tonic-gate return; 7287c478bd9Sstevel@tonic-gate } 7297c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7307c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 7317c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7327c478bd9Sstevel@tonic-gate } 7337c478bd9Sstevel@tonic-gate 7347c478bd9Sstevel@tonic-gate /* 7357c478bd9Sstevel@tonic-gate * Make sure we don't have a delegation on this rnode 7367c478bd9Sstevel@tonic-gate * before destroying it. 7377c478bd9Sstevel@tonic-gate */ 7387c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 7397c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, 7407c478bd9Sstevel@tonic-gate NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); 7417c478bd9Sstevel@tonic-gate } 7427c478bd9Sstevel@tonic-gate 7437c478bd9Sstevel@tonic-gate r4inactive(rp, cr); 7447c478bd9Sstevel@tonic-gate 7457c478bd9Sstevel@tonic-gate /* 7467c478bd9Sstevel@tonic-gate * Recheck the vnode reference count. We need to 7477c478bd9Sstevel@tonic-gate * make sure that another reference has not been 7487c478bd9Sstevel@tonic-gate * acquired while we were not holding v_lock. The 7497c478bd9Sstevel@tonic-gate * rnode is not in the rnode hash queues; one 7507c478bd9Sstevel@tonic-gate * way for a reference to have been acquired 7517c478bd9Sstevel@tonic-gate * is for a VOP_PUTPAGE because the rnode was marked 7527c478bd9Sstevel@tonic-gate * with R4DIRTY or for a modified page. This 7537c478bd9Sstevel@tonic-gate * reference may have been acquired before our call 7547c478bd9Sstevel@tonic-gate * to r4inactive. The i/o may have been completed, 7557c478bd9Sstevel@tonic-gate * thus allowing r4inactive to complete, but the 7567c478bd9Sstevel@tonic-gate * reference to the vnode may not have been released 7577c478bd9Sstevel@tonic-gate * yet. In any case, the rnode can not be destroyed 7587c478bd9Sstevel@tonic-gate * until the other references to this vnode have been 7597c478bd9Sstevel@tonic-gate * released. The other references will take care of 7607c478bd9Sstevel@tonic-gate * either destroying the rnode or placing it on the 7617c478bd9Sstevel@tonic-gate * rnode freelist. If there are no other references, 7627c478bd9Sstevel@tonic-gate * then the rnode may be safely destroyed. 7637c478bd9Sstevel@tonic-gate */ 7647c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 7657c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 7667c478bd9Sstevel@tonic-gate vp->v_count--; 7677c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7687c478bd9Sstevel@tonic-gate return; 7697c478bd9Sstevel@tonic-gate } 7707c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7717c478bd9Sstevel@tonic-gate 7727c478bd9Sstevel@tonic-gate destroy_rnode4(rp); 7737c478bd9Sstevel@tonic-gate return; 7747c478bd9Sstevel@tonic-gate } 7757c478bd9Sstevel@tonic-gate 7767c478bd9Sstevel@tonic-gate /* 7777c478bd9Sstevel@tonic-gate * Lock the hash queue and then recheck the reference count 7787c478bd9Sstevel@tonic-gate * to ensure that no other threads have acquired a reference 7797c478bd9Sstevel@tonic-gate * to indicate that the rnode should not be placed on the 7807c478bd9Sstevel@tonic-gate * freelist. If another reference has been acquired, then 7817c478bd9Sstevel@tonic-gate * just release this one and let the other thread complete 7827c478bd9Sstevel@tonic-gate * the processing of adding this rnode to the freelist. 7837c478bd9Sstevel@tonic-gate */ 7847c478bd9Sstevel@tonic-gate again: 7857c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 7867c478bd9Sstevel@tonic-gate 7877c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 7887c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 7897c478bd9Sstevel@tonic-gate vp->v_count--; 7907c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7917c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 7927c478bd9Sstevel@tonic-gate return; 7937c478bd9Sstevel@tonic-gate } 7947c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 7957c478bd9Sstevel@tonic-gate 7967c478bd9Sstevel@tonic-gate /* 7977c478bd9Sstevel@tonic-gate * Make sure we don't put an rnode with a delegation 7987c478bd9Sstevel@tonic-gate * on the free list. 7997c478bd9Sstevel@tonic-gate */ 8007c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 8017c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 8027c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, 8037c478bd9Sstevel@tonic-gate NFS4_DR_FORCE|NFS4_DR_PUSH|NFS4_DR_REOPEN); 8047c478bd9Sstevel@tonic-gate goto again; 8057c478bd9Sstevel@tonic-gate } 8067c478bd9Sstevel@tonic-gate 8077c478bd9Sstevel@tonic-gate /* 8087c478bd9Sstevel@tonic-gate * Now that we have the hash queue lock, and we know there 8097c478bd9Sstevel@tonic-gate * are not anymore references on the vnode, check to make 8107c478bd9Sstevel@tonic-gate * sure there aren't any open streams still on the rnode. 8117c478bd9Sstevel@tonic-gate * If so, drop the hash queue lock, remove the open streams, 8127c478bd9Sstevel@tonic-gate * and recheck the v_count. 8137c478bd9Sstevel@tonic-gate */ 8147c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_os_lock); 8157c478bd9Sstevel@tonic-gate if (list_head(&rp->r_open_streams) != NULL) { 8167c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 8177c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 818*108322fbScarlsonj if (nfs_zone() != VTOMI4(vp)->mi_zone) 8197c478bd9Sstevel@tonic-gate nfs4_clear_open_streams(rp); 8207c478bd9Sstevel@tonic-gate else 8217c478bd9Sstevel@tonic-gate (void) nfs4close_all(vp, cr); 8227c478bd9Sstevel@tonic-gate goto again; 8237c478bd9Sstevel@tonic-gate } 8247c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 8257c478bd9Sstevel@tonic-gate 8267c478bd9Sstevel@tonic-gate /* 8277c478bd9Sstevel@tonic-gate * Before we put it on the freelist, make sure there is no 8287c478bd9Sstevel@tonic-gate * active xattr directory cached, the freelist will not 8297c478bd9Sstevel@tonic-gate * have its entries r4inactive'd if there is still an active 8307c478bd9Sstevel@tonic-gate * rnode, thus nothing in the freelist can hold another 8317c478bd9Sstevel@tonic-gate * rnode active. 8327c478bd9Sstevel@tonic-gate */ 8337c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 8347c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 8357c478bd9Sstevel@tonic-gate 8367c478bd9Sstevel@tonic-gate /* 8377c478bd9Sstevel@tonic-gate * If there is no cached data or metadata for this file, then 8387c478bd9Sstevel@tonic-gate * put the rnode on the front of the freelist so that it will 8397c478bd9Sstevel@tonic-gate * be reused before other rnodes which may have cached data or 8407c478bd9Sstevel@tonic-gate * metadata associated with them. 8417c478bd9Sstevel@tonic-gate */ 8427c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 8437c478bd9Sstevel@tonic-gate if (rp4freelist == NULL) { 8447c478bd9Sstevel@tonic-gate rp->r_freef = rp; 8457c478bd9Sstevel@tonic-gate rp->r_freeb = rp; 8467c478bd9Sstevel@tonic-gate rp4freelist = rp; 8477c478bd9Sstevel@tonic-gate } else { 8487c478bd9Sstevel@tonic-gate rp->r_freef = rp4freelist; 8497c478bd9Sstevel@tonic-gate rp->r_freeb = rp4freelist->r_freeb; 8507c478bd9Sstevel@tonic-gate rp4freelist->r_freeb->r_freef = rp; 8517c478bd9Sstevel@tonic-gate rp4freelist->r_freeb = rp; 8527c478bd9Sstevel@tonic-gate if (!nfs4_has_pages(vp) && rp->r_dir == NULL && 8537c478bd9Sstevel@tonic-gate rp->r_symlink.contents == NULL && 8547c478bd9Sstevel@tonic-gate rp->r_secattr == NULL) 8557c478bd9Sstevel@tonic-gate rp4freelist = rp; 8567c478bd9Sstevel@tonic-gate } 8577c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 8587c478bd9Sstevel@tonic-gate 8597c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate if (xattr) 8627c478bd9Sstevel@tonic-gate VN_RELE(xattr); 8637c478bd9Sstevel@tonic-gate } 8647c478bd9Sstevel@tonic-gate 8657c478bd9Sstevel@tonic-gate /* 8667c478bd9Sstevel@tonic-gate * Remove an rnode from the free list. 8677c478bd9Sstevel@tonic-gate * 8687c478bd9Sstevel@tonic-gate * The caller must be holding rp4freelist_lock and the rnode 8697c478bd9Sstevel@tonic-gate * must be on the freelist. 8707c478bd9Sstevel@tonic-gate */ 8717c478bd9Sstevel@tonic-gate static void 8727c478bd9Sstevel@tonic-gate rp4_rmfree(rnode4_t *rp) 8737c478bd9Sstevel@tonic-gate { 8747c478bd9Sstevel@tonic-gate 8757c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&rp4freelist_lock)); 8767c478bd9Sstevel@tonic-gate ASSERT(rp->r_freef != NULL && rp->r_freeb != NULL); 8777c478bd9Sstevel@tonic-gate 8787c478bd9Sstevel@tonic-gate if (rp == rp4freelist) { 8797c478bd9Sstevel@tonic-gate rp4freelist = rp->r_freef; 8807c478bd9Sstevel@tonic-gate if (rp == rp4freelist) 8817c478bd9Sstevel@tonic-gate rp4freelist = NULL; 8827c478bd9Sstevel@tonic-gate } 8837c478bd9Sstevel@tonic-gate rp->r_freeb->r_freef = rp->r_freef; 8847c478bd9Sstevel@tonic-gate rp->r_freef->r_freeb = rp->r_freeb; 8857c478bd9Sstevel@tonic-gate 8867c478bd9Sstevel@tonic-gate rp->r_freef = rp->r_freeb = NULL; 8877c478bd9Sstevel@tonic-gate } 8887c478bd9Sstevel@tonic-gate 8897c478bd9Sstevel@tonic-gate /* 8907c478bd9Sstevel@tonic-gate * Put a rnode in the hash table. 8917c478bd9Sstevel@tonic-gate * 8927c478bd9Sstevel@tonic-gate * The caller must be holding the exclusive hash queue lock 8937c478bd9Sstevel@tonic-gate */ 8947c478bd9Sstevel@tonic-gate void 8957c478bd9Sstevel@tonic-gate rp4_addhash(rnode4_t *rp) 8967c478bd9Sstevel@tonic-gate { 8977c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock)); 8987c478bd9Sstevel@tonic-gate ASSERT(!(rp->r_flags & R4HASHED)); 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate #ifdef DEBUG 9017c478bd9Sstevel@tonic-gate r4_dup_check(rp, RTOV4(rp)->v_vfsp); 9027c478bd9Sstevel@tonic-gate #endif 9037c478bd9Sstevel@tonic-gate 9047c478bd9Sstevel@tonic-gate rp->r_hashf = rp->r_hashq->r_hashf; 9057c478bd9Sstevel@tonic-gate rp->r_hashq->r_hashf = rp; 9067c478bd9Sstevel@tonic-gate rp->r_hashb = (rnode4_t *)rp->r_hashq; 9077c478bd9Sstevel@tonic-gate rp->r_hashf->r_hashb = rp; 9087c478bd9Sstevel@tonic-gate 9097c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9107c478bd9Sstevel@tonic-gate rp->r_flags |= R4HASHED; 9117c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9127c478bd9Sstevel@tonic-gate } 9137c478bd9Sstevel@tonic-gate 9147c478bd9Sstevel@tonic-gate /* 9157c478bd9Sstevel@tonic-gate * Remove a rnode from the hash table. 9167c478bd9Sstevel@tonic-gate * 9177c478bd9Sstevel@tonic-gate * The caller must be holding the hash queue lock. 9187c478bd9Sstevel@tonic-gate */ 9197c478bd9Sstevel@tonic-gate void 9207c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rnode4_t *rp) 9217c478bd9Sstevel@tonic-gate { 9227c478bd9Sstevel@tonic-gate ASSERT(RW_WRITE_HELD(&rp->r_hashq->r_lock)); 9237c478bd9Sstevel@tonic-gate ASSERT(rp->r_flags & R4HASHED); 9247c478bd9Sstevel@tonic-gate 9257c478bd9Sstevel@tonic-gate rp->r_hashb->r_hashf = rp->r_hashf; 9267c478bd9Sstevel@tonic-gate rp->r_hashf->r_hashb = rp->r_hashb; 9277c478bd9Sstevel@tonic-gate 9287c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9297c478bd9Sstevel@tonic-gate rp->r_flags &= ~R4HASHED; 9307c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9317c478bd9Sstevel@tonic-gate } 9327c478bd9Sstevel@tonic-gate 9337c478bd9Sstevel@tonic-gate /* 9347c478bd9Sstevel@tonic-gate * Remove a rnode from the hash table. 9357c478bd9Sstevel@tonic-gate * 9367c478bd9Sstevel@tonic-gate * The caller must not be holding the hash queue lock. 9377c478bd9Sstevel@tonic-gate */ 9387c478bd9Sstevel@tonic-gate void 9397c478bd9Sstevel@tonic-gate rp4_rmhash(rnode4_t *rp) 9407c478bd9Sstevel@tonic-gate { 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 9437c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 9447c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 9457c478bd9Sstevel@tonic-gate } 9467c478bd9Sstevel@tonic-gate 9477c478bd9Sstevel@tonic-gate /* 9487c478bd9Sstevel@tonic-gate * Lookup a rnode by fhandle. Ignores rnodes that had failed recovery. 9497c478bd9Sstevel@tonic-gate * Returns NULL if no match. If an rnode is returned, the reference count 9507c478bd9Sstevel@tonic-gate * on the master vnode is incremented. 9517c478bd9Sstevel@tonic-gate * 9527c478bd9Sstevel@tonic-gate * The caller must be holding the hash queue lock, either shared or exclusive. 9537c478bd9Sstevel@tonic-gate */ 9547c478bd9Sstevel@tonic-gate rnode4_t * 9557c478bd9Sstevel@tonic-gate r4find(r4hashq_t *rhtp, nfs4_sharedfh_t *fh, struct vfs *vfsp) 9567c478bd9Sstevel@tonic-gate { 9577c478bd9Sstevel@tonic-gate rnode4_t *rp; 9587c478bd9Sstevel@tonic-gate vnode_t *vp; 9597c478bd9Sstevel@tonic-gate 9607c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&rhtp->r_lock)); 9617c478bd9Sstevel@tonic-gate 9627c478bd9Sstevel@tonic-gate for (rp = rhtp->r_hashf; rp != (rnode4_t *)rhtp; rp = rp->r_hashf) { 9637c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 9647c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp && SFH4_SAME(rp->r_fh, fh)) { 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 9677c478bd9Sstevel@tonic-gate if (rp->r_flags & R4RECOVERR) { 9687c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9697c478bd9Sstevel@tonic-gate continue; 9707c478bd9Sstevel@tonic-gate } 9717c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 9727c478bd9Sstevel@tonic-gate #ifdef DEBUG 9737c478bd9Sstevel@tonic-gate r4_dup_check(rp, vfsp); 9747c478bd9Sstevel@tonic-gate #endif 9757c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 9767c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 9777c478bd9Sstevel@tonic-gate /* 9787c478bd9Sstevel@tonic-gate * If the rnode is on the freelist, 9797c478bd9Sstevel@tonic-gate * then remove it and use that reference 9807c478bd9Sstevel@tonic-gate * as the new reference. Otherwise, 9817c478bd9Sstevel@tonic-gate * need to increment the reference count. 9827c478bd9Sstevel@tonic-gate */ 9837c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 9847c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 9857c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 9867c478bd9Sstevel@tonic-gate } else { 9877c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 9887c478bd9Sstevel@tonic-gate VN_HOLD(vp); 9897c478bd9Sstevel@tonic-gate } 9907c478bd9Sstevel@tonic-gate } else 9917c478bd9Sstevel@tonic-gate VN_HOLD(vp); 9927c478bd9Sstevel@tonic-gate 9937c478bd9Sstevel@tonic-gate /* 9947c478bd9Sstevel@tonic-gate * if root vnode, set v_flag to indicate that 9957c478bd9Sstevel@tonic-gate */ 9967c478bd9Sstevel@tonic-gate if (isrootfh(fh, rp)) { 9977c478bd9Sstevel@tonic-gate if (!(vp->v_flag & VROOT)) { 9987c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 9997c478bd9Sstevel@tonic-gate vp->v_flag |= VROOT; 10007c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 10017c478bd9Sstevel@tonic-gate } 10027c478bd9Sstevel@tonic-gate } 10037c478bd9Sstevel@tonic-gate return (rp); 10047c478bd9Sstevel@tonic-gate } 10057c478bd9Sstevel@tonic-gate } 10067c478bd9Sstevel@tonic-gate return (NULL); 10077c478bd9Sstevel@tonic-gate } 10087c478bd9Sstevel@tonic-gate 10097c478bd9Sstevel@tonic-gate /* 10107c478bd9Sstevel@tonic-gate * Lookup an rnode by fhandle. Just a wrapper for r4find() 10117c478bd9Sstevel@tonic-gate * that assumes the caller hasn't already got the lock 10127c478bd9Sstevel@tonic-gate * on the hash bucket. 10137c478bd9Sstevel@tonic-gate */ 10147c478bd9Sstevel@tonic-gate rnode4_t * 10157c478bd9Sstevel@tonic-gate r4find_unlocked(nfs4_sharedfh_t *fh, struct vfs *vfsp) 10167c478bd9Sstevel@tonic-gate { 10177c478bd9Sstevel@tonic-gate rnode4_t *rp; 10187c478bd9Sstevel@tonic-gate int index; 10197c478bd9Sstevel@tonic-gate 10207c478bd9Sstevel@tonic-gate index = rtable4hash(fh); 10217c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 10227c478bd9Sstevel@tonic-gate rp = r4find(&rtable4[index], fh, vfsp); 10237c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 10247c478bd9Sstevel@tonic-gate 10257c478bd9Sstevel@tonic-gate return (rp); 10267c478bd9Sstevel@tonic-gate } 10277c478bd9Sstevel@tonic-gate 10287c478bd9Sstevel@tonic-gate /* 10297c478bd9Sstevel@tonic-gate * Return 1 if there is a active vnode belonging to this vfs in the 10307c478bd9Sstevel@tonic-gate * rtable4 cache. 10317c478bd9Sstevel@tonic-gate * 10327c478bd9Sstevel@tonic-gate * Several of these checks are done without holding the usual 10337c478bd9Sstevel@tonic-gate * locks. This is safe because destroy_rtable(), rp_addfree(), 10347c478bd9Sstevel@tonic-gate * etc. will redo the necessary checks before actually destroying 10357c478bd9Sstevel@tonic-gate * any rnodes. 10367c478bd9Sstevel@tonic-gate */ 10377c478bd9Sstevel@tonic-gate int 10387c478bd9Sstevel@tonic-gate check_rtable4(struct vfs *vfsp) 10397c478bd9Sstevel@tonic-gate { 10407c478bd9Sstevel@tonic-gate rnode4_t *rp; 10417c478bd9Sstevel@tonic-gate vnode_t *vp; 10427c478bd9Sstevel@tonic-gate char *busy = NULL; 10437c478bd9Sstevel@tonic-gate int index; 10447c478bd9Sstevel@tonic-gate 10457c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 10467c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 10477c478bd9Sstevel@tonic-gate 10487c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 10497c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 10507c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 10517c478bd9Sstevel@tonic-gate 10527c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 10537c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp) { 10547c478bd9Sstevel@tonic-gate if (rp->r_freef == NULL) { 10557c478bd9Sstevel@tonic-gate busy = "not on free list"; 10567c478bd9Sstevel@tonic-gate } else if (nfs4_has_pages(vp) && 10577c478bd9Sstevel@tonic-gate (rp->r_flags & R4DIRTY)) { 10587c478bd9Sstevel@tonic-gate busy = "dirty pages"; 10597c478bd9Sstevel@tonic-gate } else if (rp->r_count > 0) { 10607c478bd9Sstevel@tonic-gate busy = "r_count > 0"; 10617c478bd9Sstevel@tonic-gate } 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate if (busy != NULL) { 10647c478bd9Sstevel@tonic-gate #ifdef DEBUG 10657c478bd9Sstevel@tonic-gate char *path; 10667c478bd9Sstevel@tonic-gate 10677c478bd9Sstevel@tonic-gate path = fn_path(rp->r_svnode.sv_name); 10687c478bd9Sstevel@tonic-gate NFS4_DEBUG(nfs4_rnode_debug, 10697c478bd9Sstevel@tonic-gate (CE_NOTE, "check_rtable4: " "%s %s", 10707c478bd9Sstevel@tonic-gate path, busy)); 10717c478bd9Sstevel@tonic-gate kmem_free(path, strlen(path)+1); 10727c478bd9Sstevel@tonic-gate #endif 10737c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 10747c478bd9Sstevel@tonic-gate return (1); 10757c478bd9Sstevel@tonic-gate } 10767c478bd9Sstevel@tonic-gate } 10777c478bd9Sstevel@tonic-gate } 10787c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 10797c478bd9Sstevel@tonic-gate } 10807c478bd9Sstevel@tonic-gate return (0); 10817c478bd9Sstevel@tonic-gate } 10827c478bd9Sstevel@tonic-gate 10837c478bd9Sstevel@tonic-gate /* 10847c478bd9Sstevel@tonic-gate * Destroy inactive vnodes from the hash queues which 10857c478bd9Sstevel@tonic-gate * belong to this vfs. All of the vnodes should be inactive. 10867c478bd9Sstevel@tonic-gate * It is essential that we destory all rnodes in case of 10877c478bd9Sstevel@tonic-gate * forced unmount as well as in normal unmount case. 10887c478bd9Sstevel@tonic-gate */ 10897c478bd9Sstevel@tonic-gate 10907c478bd9Sstevel@tonic-gate void 10917c478bd9Sstevel@tonic-gate destroy_rtable4(struct vfs *vfsp, cred_t *cr) 10927c478bd9Sstevel@tonic-gate { 10937c478bd9Sstevel@tonic-gate int index; 10947c478bd9Sstevel@tonic-gate vnode_t *vp; 10957c478bd9Sstevel@tonic-gate rnode4_t *rp, *r_hashf, *rlist; 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate rlist = NULL; 10987c478bd9Sstevel@tonic-gate 10997c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 11007c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_WRITER); 11017c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 11027c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 11037c478bd9Sstevel@tonic-gate rp = r_hashf) { 11047c478bd9Sstevel@tonic-gate /* save the hash pointer before destroying */ 11057c478bd9Sstevel@tonic-gate r_hashf = rp->r_hashf; 11067c478bd9Sstevel@tonic-gate 11077c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11087c478bd9Sstevel@tonic-gate if (vp->v_vfsp == vfsp) { 11097c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 11107c478bd9Sstevel@tonic-gate if (rp->r_freef != NULL) { 11117c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 11127c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 11137c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 11147c478bd9Sstevel@tonic-gate rp->r_hashf = rlist; 11157c478bd9Sstevel@tonic-gate rlist = rp; 11167c478bd9Sstevel@tonic-gate } else 11177c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 11187c478bd9Sstevel@tonic-gate } 11197c478bd9Sstevel@tonic-gate } 11207c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11217c478bd9Sstevel@tonic-gate } 11227c478bd9Sstevel@tonic-gate 11237c478bd9Sstevel@tonic-gate for (rp = rlist; rp != NULL; rp = r_hashf) { 11247c478bd9Sstevel@tonic-gate r_hashf = rp->r_hashf; 11257c478bd9Sstevel@tonic-gate /* 11267c478bd9Sstevel@tonic-gate * This call to rp4_addfree will end up destroying the 11277c478bd9Sstevel@tonic-gate * rnode, but in a safe way with the appropriate set 11287c478bd9Sstevel@tonic-gate * of checks done. 11297c478bd9Sstevel@tonic-gate */ 11307c478bd9Sstevel@tonic-gate rp4_addfree(rp, cr); 11317c478bd9Sstevel@tonic-gate } 11327c478bd9Sstevel@tonic-gate } 11337c478bd9Sstevel@tonic-gate 11347c478bd9Sstevel@tonic-gate /* 11357c478bd9Sstevel@tonic-gate * This routine destroys all the resources of an rnode 11367c478bd9Sstevel@tonic-gate * and finally the rnode itself. 11377c478bd9Sstevel@tonic-gate */ 11387c478bd9Sstevel@tonic-gate static void 11397c478bd9Sstevel@tonic-gate destroy_rnode4(rnode4_t *rp) 11407c478bd9Sstevel@tonic-gate { 11417c478bd9Sstevel@tonic-gate vnode_t *vp; 11427c478bd9Sstevel@tonic-gate vfs_t *vfsp; 11437c478bd9Sstevel@tonic-gate 11447c478bd9Sstevel@tonic-gate ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE); 11457c478bd9Sstevel@tonic-gate 11467c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11477c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp; 11487c478bd9Sstevel@tonic-gate 11497c478bd9Sstevel@tonic-gate uninit_rnode4(rp); 11507c478bd9Sstevel@tonic-gate atomic_add_long((ulong_t *)&rnode4_new, -1); 11517c478bd9Sstevel@tonic-gate #ifdef DEBUG 11527c478bd9Sstevel@tonic-gate clstat4_debug.nrnode.value.ui64--; 11537c478bd9Sstevel@tonic-gate #endif 11547c478bd9Sstevel@tonic-gate kmem_cache_free(rnode4_cache, rp); 11557c478bd9Sstevel@tonic-gate vn_invalid(vp); 11567c478bd9Sstevel@tonic-gate vn_free(vp); 11577c478bd9Sstevel@tonic-gate VFS_RELE(vfsp); 11587c478bd9Sstevel@tonic-gate } 11597c478bd9Sstevel@tonic-gate 11607c478bd9Sstevel@tonic-gate /* 11617c478bd9Sstevel@tonic-gate * Invalidate the attributes on all rnodes forcing the next getattr 11627c478bd9Sstevel@tonic-gate * to go over the wire. Used to flush stale uid and gid mappings. 11637c478bd9Sstevel@tonic-gate * Maybe done on a per vfsp, or all rnodes (vfsp == NULL) 11647c478bd9Sstevel@tonic-gate */ 11657c478bd9Sstevel@tonic-gate void 11667c478bd9Sstevel@tonic-gate nfs4_rnode_invalidate(struct vfs *vfsp) 11677c478bd9Sstevel@tonic-gate { 11687c478bd9Sstevel@tonic-gate int index; 11697c478bd9Sstevel@tonic-gate rnode4_t *rp; 11707c478bd9Sstevel@tonic-gate vnode_t *vp; 11717c478bd9Sstevel@tonic-gate 11727c478bd9Sstevel@tonic-gate /* 11737c478bd9Sstevel@tonic-gate * Walk the hash queues looking for rnodes. 11747c478bd9Sstevel@tonic-gate */ 11757c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 11767c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 11777c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 11787c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 11797c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 11807c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 11817c478bd9Sstevel@tonic-gate if (vfsp != NULL && vp->v_vfsp != vfsp) 11827c478bd9Sstevel@tonic-gate continue; 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&rp->r_statelock)) 11857c478bd9Sstevel@tonic-gate continue; 11867c478bd9Sstevel@tonic-gate 11877c478bd9Sstevel@tonic-gate /* 11887c478bd9Sstevel@tonic-gate * Expire the attributes by resetting the change 11897c478bd9Sstevel@tonic-gate * and attr timeout. 11907c478bd9Sstevel@tonic-gate */ 11917c478bd9Sstevel@tonic-gate rp->r_change = 0; 11927c478bd9Sstevel@tonic-gate PURGE_ATTRCACHE4_LOCKED(rp); 11937c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 11947c478bd9Sstevel@tonic-gate } 11957c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 11967c478bd9Sstevel@tonic-gate } 11977c478bd9Sstevel@tonic-gate } 11987c478bd9Sstevel@tonic-gate 11997c478bd9Sstevel@tonic-gate /* 12007c478bd9Sstevel@tonic-gate * Flush all vnodes in this (or every) vfs. 12017c478bd9Sstevel@tonic-gate * Used by nfs_sync and by nfs_unmount. 12027c478bd9Sstevel@tonic-gate */ 12037c478bd9Sstevel@tonic-gate void 12047c478bd9Sstevel@tonic-gate r4flush(struct vfs *vfsp, cred_t *cr) 12057c478bd9Sstevel@tonic-gate { 12067c478bd9Sstevel@tonic-gate int index; 12077c478bd9Sstevel@tonic-gate rnode4_t *rp; 12087c478bd9Sstevel@tonic-gate vnode_t *vp, **vplist; 12097c478bd9Sstevel@tonic-gate long num, cnt; 12107c478bd9Sstevel@tonic-gate 12117c478bd9Sstevel@tonic-gate /* 12127c478bd9Sstevel@tonic-gate * Check to see whether there is anything to do. 12137c478bd9Sstevel@tonic-gate */ 12147c478bd9Sstevel@tonic-gate num = rnode4_new; 12157c478bd9Sstevel@tonic-gate if (num == 0) 12167c478bd9Sstevel@tonic-gate return; 12177c478bd9Sstevel@tonic-gate 12187c478bd9Sstevel@tonic-gate /* 12197c478bd9Sstevel@tonic-gate * Allocate a slot for all currently active rnodes on the 12207c478bd9Sstevel@tonic-gate * supposition that they all may need flushing. 12217c478bd9Sstevel@tonic-gate */ 12227c478bd9Sstevel@tonic-gate vplist = kmem_alloc(num * sizeof (*vplist), KM_SLEEP); 12237c478bd9Sstevel@tonic-gate cnt = 0; 12247c478bd9Sstevel@tonic-gate 12257c478bd9Sstevel@tonic-gate /* 12267c478bd9Sstevel@tonic-gate * Walk the hash queues looking for rnodes with page 12277c478bd9Sstevel@tonic-gate * lists associated with them. Make a list of these 12287c478bd9Sstevel@tonic-gate * files. 12297c478bd9Sstevel@tonic-gate */ 12307c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 12317c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 12327c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 12337c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 12347c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 12357c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 12367c478bd9Sstevel@tonic-gate /* 12377c478bd9Sstevel@tonic-gate * Don't bother sync'ing a vp if it 12387c478bd9Sstevel@tonic-gate * is part of virtual swap device or 12397c478bd9Sstevel@tonic-gate * if VFS is read-only 12407c478bd9Sstevel@tonic-gate */ 12417c478bd9Sstevel@tonic-gate if (IS_SWAPVP(vp) || vn_is_readonly(vp)) 12427c478bd9Sstevel@tonic-gate continue; 12437c478bd9Sstevel@tonic-gate /* 12447c478bd9Sstevel@tonic-gate * If flushing all mounted file systems or 12457c478bd9Sstevel@tonic-gate * the vnode belongs to this vfs, has pages 12467c478bd9Sstevel@tonic-gate * and is marked as either dirty or mmap'd, 12477c478bd9Sstevel@tonic-gate * hold and add this vnode to the list of 12487c478bd9Sstevel@tonic-gate * vnodes to flush. 12497c478bd9Sstevel@tonic-gate */ 12507c478bd9Sstevel@tonic-gate if ((vfsp == NULL || vp->v_vfsp == vfsp) && 12517c478bd9Sstevel@tonic-gate nfs4_has_pages(vp) && 12527c478bd9Sstevel@tonic-gate ((rp->r_flags & R4DIRTY) || rp->r_mapcnt > 0)) { 12537c478bd9Sstevel@tonic-gate VN_HOLD(vp); 12547c478bd9Sstevel@tonic-gate vplist[cnt++] = vp; 12557c478bd9Sstevel@tonic-gate if (cnt == num) { 12567c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 12577c478bd9Sstevel@tonic-gate goto toomany; 12587c478bd9Sstevel@tonic-gate } 12597c478bd9Sstevel@tonic-gate } 12607c478bd9Sstevel@tonic-gate } 12617c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 12627c478bd9Sstevel@tonic-gate } 12637c478bd9Sstevel@tonic-gate toomany: 12647c478bd9Sstevel@tonic-gate 12657c478bd9Sstevel@tonic-gate /* 12667c478bd9Sstevel@tonic-gate * Flush and release all of the files on the list. 12677c478bd9Sstevel@tonic-gate */ 12687c478bd9Sstevel@tonic-gate while (cnt-- > 0) { 12697c478bd9Sstevel@tonic-gate vp = vplist[cnt]; 12707c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_ASYNC, cr); 12717c478bd9Sstevel@tonic-gate VN_RELE(vp); 12727c478bd9Sstevel@tonic-gate } 12737c478bd9Sstevel@tonic-gate 12747c478bd9Sstevel@tonic-gate /* 12757c478bd9Sstevel@tonic-gate * Free the space allocated to hold the list. 12767c478bd9Sstevel@tonic-gate */ 12777c478bd9Sstevel@tonic-gate kmem_free(vplist, num * sizeof (*vplist)); 12787c478bd9Sstevel@tonic-gate } 12797c478bd9Sstevel@tonic-gate 12807c478bd9Sstevel@tonic-gate int 12817c478bd9Sstevel@tonic-gate nfs4_free_data_reclaim(rnode4_t *rp) 12827c478bd9Sstevel@tonic-gate { 12837c478bd9Sstevel@tonic-gate char *contents; 12847c478bd9Sstevel@tonic-gate vnode_t *xattr; 12857c478bd9Sstevel@tonic-gate int size; 12867c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 12877c478bd9Sstevel@tonic-gate int freed; 12887c478bd9Sstevel@tonic-gate bool_t rdc = FALSE; 12897c478bd9Sstevel@tonic-gate 12907c478bd9Sstevel@tonic-gate /* 12917c478bd9Sstevel@tonic-gate * Free any held caches which may 12927c478bd9Sstevel@tonic-gate * be associated with this rnode. 12937c478bd9Sstevel@tonic-gate */ 12947c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statelock); 12957c478bd9Sstevel@tonic-gate if (rp->r_dir != NULL) 12967c478bd9Sstevel@tonic-gate rdc = TRUE; 12977c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 12987c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 12997c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 13007c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 13017c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 13027c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 13037c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 13047c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 13057c478bd9Sstevel@tonic-gate 13067c478bd9Sstevel@tonic-gate /* 13077c478bd9Sstevel@tonic-gate * Free the access cache entries. 13087c478bd9Sstevel@tonic-gate */ 13097c478bd9Sstevel@tonic-gate freed = nfs4_access_purge_rp(rp); 13107c478bd9Sstevel@tonic-gate 13117c478bd9Sstevel@tonic-gate if (rdc == FALSE && contents == NULL && vsp == NULL && xattr == NULL) 13127c478bd9Sstevel@tonic-gate return (freed); 13137c478bd9Sstevel@tonic-gate 13147c478bd9Sstevel@tonic-gate /* 13157c478bd9Sstevel@tonic-gate * Free the readdir cache entries, incompletely if we can't block. 13167c478bd9Sstevel@tonic-gate */ 13177c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(RTOV4(rp)); 13187c478bd9Sstevel@tonic-gate 13197c478bd9Sstevel@tonic-gate /* 13207c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 13217c478bd9Sstevel@tonic-gate */ 13227c478bd9Sstevel@tonic-gate if (contents != NULL) { 13237c478bd9Sstevel@tonic-gate 13247c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 13257c478bd9Sstevel@tonic-gate } 13267c478bd9Sstevel@tonic-gate 13277c478bd9Sstevel@tonic-gate /* 13287c478bd9Sstevel@tonic-gate * Free any cached ACL. 13297c478bd9Sstevel@tonic-gate */ 13307c478bd9Sstevel@tonic-gate if (vsp != NULL) 13317c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 13327c478bd9Sstevel@tonic-gate 13337c478bd9Sstevel@tonic-gate /* 13347c478bd9Sstevel@tonic-gate * Release the xattr directory vnode 13357c478bd9Sstevel@tonic-gate */ 13367c478bd9Sstevel@tonic-gate if (xattr != NULL) 13377c478bd9Sstevel@tonic-gate VN_RELE(xattr); 13387c478bd9Sstevel@tonic-gate 13397c478bd9Sstevel@tonic-gate return (1); 13407c478bd9Sstevel@tonic-gate } 13417c478bd9Sstevel@tonic-gate 13427c478bd9Sstevel@tonic-gate static int 13437c478bd9Sstevel@tonic-gate nfs4_active_data_reclaim(rnode4_t *rp) 13447c478bd9Sstevel@tonic-gate { 13457c478bd9Sstevel@tonic-gate char *contents; 13467c478bd9Sstevel@tonic-gate vnode_t *xattr; 13477c478bd9Sstevel@tonic-gate int size; 13487c478bd9Sstevel@tonic-gate vsecattr_t *vsp; 13497c478bd9Sstevel@tonic-gate int freed; 13507c478bd9Sstevel@tonic-gate bool_t rdc = FALSE; 13517c478bd9Sstevel@tonic-gate 13527c478bd9Sstevel@tonic-gate /* 13537c478bd9Sstevel@tonic-gate * Free any held credentials and caches which 13547c478bd9Sstevel@tonic-gate * may be associated with this rnode. 13557c478bd9Sstevel@tonic-gate */ 13567c478bd9Sstevel@tonic-gate if (!mutex_tryenter(&rp->r_statelock)) 13577c478bd9Sstevel@tonic-gate return (0); 13587c478bd9Sstevel@tonic-gate contents = rp->r_symlink.contents; 13597c478bd9Sstevel@tonic-gate size = rp->r_symlink.size; 13607c478bd9Sstevel@tonic-gate rp->r_symlink.contents = NULL; 13617c478bd9Sstevel@tonic-gate vsp = rp->r_secattr; 13627c478bd9Sstevel@tonic-gate rp->r_secattr = NULL; 13637c478bd9Sstevel@tonic-gate if (rp->r_dir != NULL) 13647c478bd9Sstevel@tonic-gate rdc = TRUE; 13657c478bd9Sstevel@tonic-gate xattr = rp->r_xattr_dir; 13667c478bd9Sstevel@tonic-gate rp->r_xattr_dir = NULL; 13677c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statelock); 13687c478bd9Sstevel@tonic-gate 13697c478bd9Sstevel@tonic-gate /* 13707c478bd9Sstevel@tonic-gate * Free the access cache entries. 13717c478bd9Sstevel@tonic-gate */ 13727c478bd9Sstevel@tonic-gate freed = nfs4_access_purge_rp(rp); 13737c478bd9Sstevel@tonic-gate 13747c478bd9Sstevel@tonic-gate if (contents == NULL && vsp == NULL && rdc == FALSE && xattr == NULL) 13757c478bd9Sstevel@tonic-gate return (freed); 13767c478bd9Sstevel@tonic-gate 13777c478bd9Sstevel@tonic-gate /* 13787c478bd9Sstevel@tonic-gate * Free the symbolic link cache. 13797c478bd9Sstevel@tonic-gate */ 13807c478bd9Sstevel@tonic-gate if (contents != NULL) { 13817c478bd9Sstevel@tonic-gate 13827c478bd9Sstevel@tonic-gate kmem_free((void *)contents, size); 13837c478bd9Sstevel@tonic-gate } 13847c478bd9Sstevel@tonic-gate 13857c478bd9Sstevel@tonic-gate /* 13867c478bd9Sstevel@tonic-gate * Free any cached ACL. 13877c478bd9Sstevel@tonic-gate */ 13887c478bd9Sstevel@tonic-gate if (vsp != NULL) 13897c478bd9Sstevel@tonic-gate nfs4_acl_free_cache(vsp); 13907c478bd9Sstevel@tonic-gate 13917c478bd9Sstevel@tonic-gate nfs4_purge_rddir_cache(RTOV4(rp)); 13927c478bd9Sstevel@tonic-gate 13937c478bd9Sstevel@tonic-gate /* 13947c478bd9Sstevel@tonic-gate * Release the xattr directory vnode 13957c478bd9Sstevel@tonic-gate */ 13967c478bd9Sstevel@tonic-gate if (xattr != NULL) 13977c478bd9Sstevel@tonic-gate VN_RELE(xattr); 13987c478bd9Sstevel@tonic-gate 13997c478bd9Sstevel@tonic-gate return (1); 14007c478bd9Sstevel@tonic-gate } 14017c478bd9Sstevel@tonic-gate 14027c478bd9Sstevel@tonic-gate static int 14037c478bd9Sstevel@tonic-gate nfs4_free_reclaim(void) 14047c478bd9Sstevel@tonic-gate { 14057c478bd9Sstevel@tonic-gate int freed; 14067c478bd9Sstevel@tonic-gate rnode4_t *rp; 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate #ifdef DEBUG 14097c478bd9Sstevel@tonic-gate clstat4_debug.f_reclaim.value.ui64++; 14107c478bd9Sstevel@tonic-gate #endif 14117c478bd9Sstevel@tonic-gate freed = 0; 14127c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 14137c478bd9Sstevel@tonic-gate rp = rp4freelist; 14147c478bd9Sstevel@tonic-gate if (rp != NULL) { 14157c478bd9Sstevel@tonic-gate do { 14167c478bd9Sstevel@tonic-gate if (nfs4_free_data_reclaim(rp)) 14177c478bd9Sstevel@tonic-gate freed = 1; 14187c478bd9Sstevel@tonic-gate } while ((rp = rp->r_freef) != rp4freelist); 14197c478bd9Sstevel@tonic-gate } 14207c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 14217c478bd9Sstevel@tonic-gate return (freed); 14227c478bd9Sstevel@tonic-gate } 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate static int 14257c478bd9Sstevel@tonic-gate nfs4_active_reclaim(void) 14267c478bd9Sstevel@tonic-gate { 14277c478bd9Sstevel@tonic-gate int freed; 14287c478bd9Sstevel@tonic-gate int index; 14297c478bd9Sstevel@tonic-gate rnode4_t *rp; 14307c478bd9Sstevel@tonic-gate 14317c478bd9Sstevel@tonic-gate #ifdef DEBUG 14327c478bd9Sstevel@tonic-gate clstat4_debug.a_reclaim.value.ui64++; 14337c478bd9Sstevel@tonic-gate #endif 14347c478bd9Sstevel@tonic-gate freed = 0; 14357c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 14367c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 14377c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 14387c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 14397c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 14407c478bd9Sstevel@tonic-gate if (nfs4_active_data_reclaim(rp)) 14417c478bd9Sstevel@tonic-gate freed = 1; 14427c478bd9Sstevel@tonic-gate } 14437c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 14447c478bd9Sstevel@tonic-gate } 14457c478bd9Sstevel@tonic-gate return (freed); 14467c478bd9Sstevel@tonic-gate } 14477c478bd9Sstevel@tonic-gate 14487c478bd9Sstevel@tonic-gate static int 14497c478bd9Sstevel@tonic-gate nfs4_rnode_reclaim(void) 14507c478bd9Sstevel@tonic-gate { 14517c478bd9Sstevel@tonic-gate int freed; 14527c478bd9Sstevel@tonic-gate rnode4_t *rp; 14537c478bd9Sstevel@tonic-gate vnode_t *vp; 14547c478bd9Sstevel@tonic-gate 14557c478bd9Sstevel@tonic-gate #ifdef DEBUG 14567c478bd9Sstevel@tonic-gate clstat4_debug.r_reclaim.value.ui64++; 14577c478bd9Sstevel@tonic-gate #endif 14587c478bd9Sstevel@tonic-gate freed = 0; 14597c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 14607c478bd9Sstevel@tonic-gate while ((rp = rp4freelist) != NULL) { 14617c478bd9Sstevel@tonic-gate rp4_rmfree(rp); 14627c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 14637c478bd9Sstevel@tonic-gate if (rp->r_flags & R4HASHED) { 14647c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 14657c478bd9Sstevel@tonic-gate rw_enter(&rp->r_hashq->r_lock, RW_WRITER); 14667c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock); 14677c478bd9Sstevel@tonic-gate if (vp->v_count > 1) { 14687c478bd9Sstevel@tonic-gate vp->v_count--; 14697c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 14707c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 14717c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 14727c478bd9Sstevel@tonic-gate continue; 14737c478bd9Sstevel@tonic-gate } 14747c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock); 14757c478bd9Sstevel@tonic-gate rp4_rmhash_locked(rp); 14767c478bd9Sstevel@tonic-gate rw_exit(&rp->r_hashq->r_lock); 14777c478bd9Sstevel@tonic-gate } 14787c478bd9Sstevel@tonic-gate /* 14797c478bd9Sstevel@tonic-gate * This call to rp_addfree will end up destroying the 14807c478bd9Sstevel@tonic-gate * rnode, but in a safe way with the appropriate set 14817c478bd9Sstevel@tonic-gate * of checks done. 14827c478bd9Sstevel@tonic-gate */ 14837c478bd9Sstevel@tonic-gate rp4_addfree(rp, CRED()); 14847c478bd9Sstevel@tonic-gate mutex_enter(&rp4freelist_lock); 14857c478bd9Sstevel@tonic-gate } 14867c478bd9Sstevel@tonic-gate mutex_exit(&rp4freelist_lock); 14877c478bd9Sstevel@tonic-gate return (freed); 14887c478bd9Sstevel@tonic-gate } 14897c478bd9Sstevel@tonic-gate 14907c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 14917c478bd9Sstevel@tonic-gate static void 14927c478bd9Sstevel@tonic-gate nfs4_reclaim(void *cdrarg) 14937c478bd9Sstevel@tonic-gate { 14947c478bd9Sstevel@tonic-gate 14957c478bd9Sstevel@tonic-gate #ifdef DEBUG 14967c478bd9Sstevel@tonic-gate clstat4_debug.reclaim.value.ui64++; 14977c478bd9Sstevel@tonic-gate #endif 14987c478bd9Sstevel@tonic-gate if (nfs4_free_reclaim()) 14997c478bd9Sstevel@tonic-gate return; 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate if (nfs4_active_reclaim()) 15027c478bd9Sstevel@tonic-gate return; 15037c478bd9Sstevel@tonic-gate 15047c478bd9Sstevel@tonic-gate (void) nfs4_rnode_reclaim(); 15057c478bd9Sstevel@tonic-gate } 15067c478bd9Sstevel@tonic-gate 15077c478bd9Sstevel@tonic-gate /* 15087c478bd9Sstevel@tonic-gate * Returns the clientid4 to use for the given mntinfo4. Note that the 15097c478bd9Sstevel@tonic-gate * clientid can change if the caller drops mi_recovlock. 15107c478bd9Sstevel@tonic-gate */ 15117c478bd9Sstevel@tonic-gate 15127c478bd9Sstevel@tonic-gate clientid4 15137c478bd9Sstevel@tonic-gate mi2clientid(mntinfo4_t *mi) 15147c478bd9Sstevel@tonic-gate { 15157c478bd9Sstevel@tonic-gate nfs4_server_t *sp; 15167c478bd9Sstevel@tonic-gate clientid4 clientid = 0; 15177c478bd9Sstevel@tonic-gate 15187c478bd9Sstevel@tonic-gate /* this locks down sp if it is found */ 15197c478bd9Sstevel@tonic-gate sp = find_nfs4_server(mi); 15207c478bd9Sstevel@tonic-gate if (sp != NULL) { 15217c478bd9Sstevel@tonic-gate clientid = sp->clientid; 15227c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 15237c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 15247c478bd9Sstevel@tonic-gate } 15257c478bd9Sstevel@tonic-gate return (clientid); 15267c478bd9Sstevel@tonic-gate } 15277c478bd9Sstevel@tonic-gate 15287c478bd9Sstevel@tonic-gate /* 15297c478bd9Sstevel@tonic-gate * Return the current lease time for the server associated with the given 15307c478bd9Sstevel@tonic-gate * file. Note that the lease time could change immediately after this 15317c478bd9Sstevel@tonic-gate * call. 15327c478bd9Sstevel@tonic-gate */ 15337c478bd9Sstevel@tonic-gate 15347c478bd9Sstevel@tonic-gate time_t 15357c478bd9Sstevel@tonic-gate r2lease_time(rnode4_t *rp) 15367c478bd9Sstevel@tonic-gate { 15377c478bd9Sstevel@tonic-gate nfs4_server_t *sp; 15387c478bd9Sstevel@tonic-gate time_t lease_time; 15397c478bd9Sstevel@tonic-gate mntinfo4_t *mi = VTOMI4(RTOV4(rp)); 15407c478bd9Sstevel@tonic-gate 15417c478bd9Sstevel@tonic-gate (void) nfs_rw_enter_sig(&mi->mi_recovlock, RW_READER, 0); 15427c478bd9Sstevel@tonic-gate 15437c478bd9Sstevel@tonic-gate /* this locks down sp if it is found */ 15447c478bd9Sstevel@tonic-gate sp = find_nfs4_server(VTOMI4(RTOV4(rp))); 15457c478bd9Sstevel@tonic-gate 15467c478bd9Sstevel@tonic-gate if (VTOMI4(RTOV4(rp))->mi_vfsp->vfs_flag & VFS_UNMOUNTED) { 15477c478bd9Sstevel@tonic-gate if (sp != NULL) { 15487c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 15497c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 15507c478bd9Sstevel@tonic-gate } 15517c478bd9Sstevel@tonic-gate nfs_rw_exit(&mi->mi_recovlock); 15527c478bd9Sstevel@tonic-gate return (1); /* 1 second */ 15537c478bd9Sstevel@tonic-gate } 15547c478bd9Sstevel@tonic-gate 15557c478bd9Sstevel@tonic-gate ASSERT(sp != NULL); 15567c478bd9Sstevel@tonic-gate 15577c478bd9Sstevel@tonic-gate lease_time = sp->s_lease_time; 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate mutex_exit(&sp->s_lock); 15607c478bd9Sstevel@tonic-gate nfs4_server_rele(sp); 15617c478bd9Sstevel@tonic-gate nfs_rw_exit(&mi->mi_recovlock); 15627c478bd9Sstevel@tonic-gate 15637c478bd9Sstevel@tonic-gate return (lease_time); 15647c478bd9Sstevel@tonic-gate } 15657c478bd9Sstevel@tonic-gate 15667c478bd9Sstevel@tonic-gate /* 15677c478bd9Sstevel@tonic-gate * Return a list with information about all the known open instances for 15687c478bd9Sstevel@tonic-gate * a filesystem. The caller must call r4releopenlist() when done with the 15697c478bd9Sstevel@tonic-gate * list. 15707c478bd9Sstevel@tonic-gate * 15717c478bd9Sstevel@tonic-gate * We are safe at looking at os_valid and os_pending_close across dropping 15727c478bd9Sstevel@tonic-gate * the 'os_sync_lock' to count up the number of open streams and then 15737c478bd9Sstevel@tonic-gate * allocate memory for the osp list due to: 15747c478bd9Sstevel@tonic-gate * -Looking at os_pending_close is safe since this routine is 15757c478bd9Sstevel@tonic-gate * only called via recovery, and os_pending_close can only be set via 15767c478bd9Sstevel@tonic-gate * a non-recovery operation (which are all blocked when recovery 15777c478bd9Sstevel@tonic-gate * is active). 15787c478bd9Sstevel@tonic-gate * 15797c478bd9Sstevel@tonic-gate * -Examining os_valid is safe since non-recovery operations, which 15807c478bd9Sstevel@tonic-gate * could potentially switch os_valid to 0, are blocked (via 15817c478bd9Sstevel@tonic-gate * nfs4_start_fop) and recovery is single-threaded per mntinfo4_t 15827c478bd9Sstevel@tonic-gate * (which means we are the only recovery thread potentially acting 15837c478bd9Sstevel@tonic-gate * on this open stream). 15847c478bd9Sstevel@tonic-gate */ 15857c478bd9Sstevel@tonic-gate 15867c478bd9Sstevel@tonic-gate nfs4_opinst_t * 15877c478bd9Sstevel@tonic-gate r4mkopenlist(mntinfo4_t *mi) 15887c478bd9Sstevel@tonic-gate { 15897c478bd9Sstevel@tonic-gate nfs4_opinst_t *reopenlist, *rep; 15907c478bd9Sstevel@tonic-gate rnode4_t *rp; 15917c478bd9Sstevel@tonic-gate vnode_t *vp; 15927c478bd9Sstevel@tonic-gate vfs_t *vfsp = mi->mi_vfsp; 15937c478bd9Sstevel@tonic-gate int numosp; 15947c478bd9Sstevel@tonic-gate nfs4_open_stream_t *osp; 15957c478bd9Sstevel@tonic-gate int index; 15967c478bd9Sstevel@tonic-gate open_delegation_type4 dtype; 15977c478bd9Sstevel@tonic-gate int hold_vnode; 15987c478bd9Sstevel@tonic-gate 15997c478bd9Sstevel@tonic-gate reopenlist = NULL; 16007c478bd9Sstevel@tonic-gate 16017c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 16027c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 16037c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 16047c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 16057c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 16067c478bd9Sstevel@tonic-gate 16077c478bd9Sstevel@tonic-gate vp = RTOV4(rp); 16087c478bd9Sstevel@tonic-gate if (vp->v_vfsp != vfsp) 16097c478bd9Sstevel@tonic-gate continue; 16107c478bd9Sstevel@tonic-gate hold_vnode = 0; 16117c478bd9Sstevel@tonic-gate 16127c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_os_lock); 16137c478bd9Sstevel@tonic-gate 16147c478bd9Sstevel@tonic-gate /* Count the number of valid open_streams of the file */ 16157c478bd9Sstevel@tonic-gate numosp = 0; 16167c478bd9Sstevel@tonic-gate for (osp = list_head(&rp->r_open_streams); osp != NULL; 16177c478bd9Sstevel@tonic-gate osp = list_next(&rp->r_open_streams, osp)) { 16187c478bd9Sstevel@tonic-gate mutex_enter(&osp->os_sync_lock); 16197c478bd9Sstevel@tonic-gate if (osp->os_valid && !osp->os_pending_close) 16207c478bd9Sstevel@tonic-gate numosp++; 16217c478bd9Sstevel@tonic-gate mutex_exit(&osp->os_sync_lock); 16227c478bd9Sstevel@tonic-gate } 16237c478bd9Sstevel@tonic-gate 16247c478bd9Sstevel@tonic-gate /* Fill in the valid open streams per vp */ 16257c478bd9Sstevel@tonic-gate if (numosp > 0) { 16267c478bd9Sstevel@tonic-gate int j; 16277c478bd9Sstevel@tonic-gate 16287c478bd9Sstevel@tonic-gate hold_vnode = 1; 16297c478bd9Sstevel@tonic-gate 16307c478bd9Sstevel@tonic-gate /* 16317c478bd9Sstevel@tonic-gate * Add a new open instance to the list 16327c478bd9Sstevel@tonic-gate */ 16337c478bd9Sstevel@tonic-gate rep = kmem_zalloc(sizeof (*reopenlist), 16347c478bd9Sstevel@tonic-gate KM_SLEEP); 16357c478bd9Sstevel@tonic-gate rep->re_next = reopenlist; 16367c478bd9Sstevel@tonic-gate reopenlist = rep; 16377c478bd9Sstevel@tonic-gate 16387c478bd9Sstevel@tonic-gate rep->re_vp = vp; 16397c478bd9Sstevel@tonic-gate rep->re_osp = kmem_zalloc( 16407c478bd9Sstevel@tonic-gate numosp * sizeof (*(rep->re_osp)), 16417c478bd9Sstevel@tonic-gate KM_SLEEP); 16427c478bd9Sstevel@tonic-gate rep->re_numosp = numosp; 16437c478bd9Sstevel@tonic-gate 16447c478bd9Sstevel@tonic-gate j = 0; 16457c478bd9Sstevel@tonic-gate for (osp = list_head(&rp->r_open_streams); 16467c478bd9Sstevel@tonic-gate osp != NULL; 16477c478bd9Sstevel@tonic-gate osp = list_next(&rp->r_open_streams, osp)) { 16487c478bd9Sstevel@tonic-gate 16497c478bd9Sstevel@tonic-gate mutex_enter(&osp->os_sync_lock); 16507c478bd9Sstevel@tonic-gate if (osp->os_valid && 16517c478bd9Sstevel@tonic-gate !osp->os_pending_close) { 16527c478bd9Sstevel@tonic-gate osp->os_ref_count++; 16537c478bd9Sstevel@tonic-gate rep->re_osp[j] = osp; 16547c478bd9Sstevel@tonic-gate j++; 16557c478bd9Sstevel@tonic-gate } 16567c478bd9Sstevel@tonic-gate mutex_exit(&osp->os_sync_lock); 16577c478bd9Sstevel@tonic-gate } 16587c478bd9Sstevel@tonic-gate /* 16597c478bd9Sstevel@tonic-gate * Assuming valid osp(s) stays valid between 16607c478bd9Sstevel@tonic-gate * the time obtaining j and numosp. 16617c478bd9Sstevel@tonic-gate */ 16627c478bd9Sstevel@tonic-gate ASSERT(j == numosp); 16637c478bd9Sstevel@tonic-gate } 16647c478bd9Sstevel@tonic-gate 16657c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_os_lock); 16667c478bd9Sstevel@tonic-gate /* do this here to keep v_lock > r_os_lock */ 16677c478bd9Sstevel@tonic-gate if (hold_vnode) 16687c478bd9Sstevel@tonic-gate VN_HOLD(vp); 16697c478bd9Sstevel@tonic-gate mutex_enter(&rp->r_statev4_lock); 16707c478bd9Sstevel@tonic-gate if (rp->r_deleg_type != OPEN_DELEGATE_NONE) { 16717c478bd9Sstevel@tonic-gate /* 16727c478bd9Sstevel@tonic-gate * If this rnode holds a delegation, 16737c478bd9Sstevel@tonic-gate * but if there are no valid open streams, 16747c478bd9Sstevel@tonic-gate * then just discard the delegation 16757c478bd9Sstevel@tonic-gate * without doing delegreturn. 16767c478bd9Sstevel@tonic-gate */ 16777c478bd9Sstevel@tonic-gate if (numosp > 0) 16787c478bd9Sstevel@tonic-gate rp->r_deleg_needs_recovery = 16797c478bd9Sstevel@tonic-gate rp->r_deleg_type; 16807c478bd9Sstevel@tonic-gate } 16817c478bd9Sstevel@tonic-gate /* Save the delegation type for use outside the lock */ 16827c478bd9Sstevel@tonic-gate dtype = rp->r_deleg_type; 16837c478bd9Sstevel@tonic-gate mutex_exit(&rp->r_statev4_lock); 16847c478bd9Sstevel@tonic-gate 16857c478bd9Sstevel@tonic-gate /* 16867c478bd9Sstevel@tonic-gate * If we have a delegation then get rid of it. 16877c478bd9Sstevel@tonic-gate * We've set rp->r_deleg_needs_recovery so we have 16887c478bd9Sstevel@tonic-gate * enough information to recover. 16897c478bd9Sstevel@tonic-gate */ 16907c478bd9Sstevel@tonic-gate if (dtype != OPEN_DELEGATE_NONE) { 16917c478bd9Sstevel@tonic-gate (void) nfs4delegreturn(rp, NFS4_DR_DISCARD); 16927c478bd9Sstevel@tonic-gate } 16937c478bd9Sstevel@tonic-gate } 16947c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 16957c478bd9Sstevel@tonic-gate } 16967c478bd9Sstevel@tonic-gate return (reopenlist); 16977c478bd9Sstevel@tonic-gate } 16987c478bd9Sstevel@tonic-gate 16997c478bd9Sstevel@tonic-gate /* 17007c478bd9Sstevel@tonic-gate * Release the list of open instance references. 17017c478bd9Sstevel@tonic-gate */ 17027c478bd9Sstevel@tonic-gate 17037c478bd9Sstevel@tonic-gate void 17047c478bd9Sstevel@tonic-gate r4releopenlist(nfs4_opinst_t *reopenp) 17057c478bd9Sstevel@tonic-gate { 17067c478bd9Sstevel@tonic-gate nfs4_opinst_t *rep, *next; 17077c478bd9Sstevel@tonic-gate int i; 17087c478bd9Sstevel@tonic-gate 17097c478bd9Sstevel@tonic-gate for (rep = reopenp; rep; rep = next) { 17107c478bd9Sstevel@tonic-gate next = rep->re_next; 17117c478bd9Sstevel@tonic-gate 17127c478bd9Sstevel@tonic-gate for (i = 0; i < rep->re_numosp; i++) 17137c478bd9Sstevel@tonic-gate open_stream_rele(rep->re_osp[i], VTOR4(rep->re_vp)); 17147c478bd9Sstevel@tonic-gate 17157c478bd9Sstevel@tonic-gate VN_RELE(rep->re_vp); 17167c478bd9Sstevel@tonic-gate kmem_free(rep->re_osp, 17177c478bd9Sstevel@tonic-gate rep->re_numosp * sizeof (*(rep->re_osp))); 17187c478bd9Sstevel@tonic-gate 17197c478bd9Sstevel@tonic-gate kmem_free(rep, sizeof (*rep)); 17207c478bd9Sstevel@tonic-gate } 17217c478bd9Sstevel@tonic-gate } 17227c478bd9Sstevel@tonic-gate 17237c478bd9Sstevel@tonic-gate int 17247c478bd9Sstevel@tonic-gate nfs4_rnode_init(void) 17257c478bd9Sstevel@tonic-gate { 17267c478bd9Sstevel@tonic-gate ulong_t nrnode4_max; 17277c478bd9Sstevel@tonic-gate int i; 17287c478bd9Sstevel@tonic-gate 17297c478bd9Sstevel@tonic-gate /* 17307c478bd9Sstevel@tonic-gate * Compute the size of the rnode4 hash table 17317c478bd9Sstevel@tonic-gate */ 17327c478bd9Sstevel@tonic-gate if (nrnode <= 0) 17337c478bd9Sstevel@tonic-gate nrnode = ncsize; 17347c478bd9Sstevel@tonic-gate nrnode4_max = 17357c478bd9Sstevel@tonic-gate (ulong_t)((kmem_maxavail() >> 2) / sizeof (struct rnode4)); 17367c478bd9Sstevel@tonic-gate if (nrnode > nrnode4_max || (nrnode == 0 && ncsize == 0)) { 17377c478bd9Sstevel@tonic-gate zcmn_err(GLOBAL_ZONEID, CE_NOTE, 17387c478bd9Sstevel@tonic-gate "setting nrnode to max value of %ld", nrnode4_max); 17397c478bd9Sstevel@tonic-gate nrnode = nrnode4_max; 17407c478bd9Sstevel@tonic-gate } 17417c478bd9Sstevel@tonic-gate rtable4size = 1 << highbit(nrnode / rnode4_hashlen); 17427c478bd9Sstevel@tonic-gate rtable4mask = rtable4size - 1; 17437c478bd9Sstevel@tonic-gate 17447c478bd9Sstevel@tonic-gate /* 17457c478bd9Sstevel@tonic-gate * Allocate and initialize the hash buckets 17467c478bd9Sstevel@tonic-gate */ 17477c478bd9Sstevel@tonic-gate rtable4 = kmem_alloc(rtable4size * sizeof (*rtable4), KM_SLEEP); 17487c478bd9Sstevel@tonic-gate for (i = 0; i < rtable4size; i++) { 17497c478bd9Sstevel@tonic-gate rtable4[i].r_hashf = (rnode4_t *)(&rtable4[i]); 17507c478bd9Sstevel@tonic-gate rtable4[i].r_hashb = (rnode4_t *)(&rtable4[i]); 17517c478bd9Sstevel@tonic-gate rw_init(&rtable4[i].r_lock, NULL, RW_DEFAULT, NULL); 17527c478bd9Sstevel@tonic-gate } 17537c478bd9Sstevel@tonic-gate 17547c478bd9Sstevel@tonic-gate rnode4_cache = kmem_cache_create("rnode4_cache", sizeof (rnode4_t), 17557c478bd9Sstevel@tonic-gate 0, NULL, NULL, nfs4_reclaim, NULL, NULL, 0); 17567c478bd9Sstevel@tonic-gate 17577c478bd9Sstevel@tonic-gate return (0); 17587c478bd9Sstevel@tonic-gate } 17597c478bd9Sstevel@tonic-gate 17607c478bd9Sstevel@tonic-gate int 17617c478bd9Sstevel@tonic-gate nfs4_rnode_fini(void) 17627c478bd9Sstevel@tonic-gate { 17637c478bd9Sstevel@tonic-gate int i; 17647c478bd9Sstevel@tonic-gate 17657c478bd9Sstevel@tonic-gate /* 17667c478bd9Sstevel@tonic-gate * Deallocate the rnode hash queues 17677c478bd9Sstevel@tonic-gate */ 17687c478bd9Sstevel@tonic-gate kmem_cache_destroy(rnode4_cache); 17697c478bd9Sstevel@tonic-gate 17707c478bd9Sstevel@tonic-gate for (i = 0; i < rtable4size; i++) 17717c478bd9Sstevel@tonic-gate rw_destroy(&rtable4[i].r_lock); 17727c478bd9Sstevel@tonic-gate 17737c478bd9Sstevel@tonic-gate kmem_free(rtable4, rtable4size * sizeof (*rtable4)); 17747c478bd9Sstevel@tonic-gate 17757c478bd9Sstevel@tonic-gate return (0); 17767c478bd9Sstevel@tonic-gate } 17777c478bd9Sstevel@tonic-gate 17787c478bd9Sstevel@tonic-gate /* 17797c478bd9Sstevel@tonic-gate * Return non-zero if the given filehandle refers to the root filehandle 17807c478bd9Sstevel@tonic-gate * for the given rnode. 17817c478bd9Sstevel@tonic-gate */ 17827c478bd9Sstevel@tonic-gate 17837c478bd9Sstevel@tonic-gate static int 17847c478bd9Sstevel@tonic-gate isrootfh(nfs4_sharedfh_t *fh, rnode4_t *rp) 17857c478bd9Sstevel@tonic-gate { 17867c478bd9Sstevel@tonic-gate int isroot; 17877c478bd9Sstevel@tonic-gate 17887c478bd9Sstevel@tonic-gate isroot = 0; 17897c478bd9Sstevel@tonic-gate if (SFH4_SAME(VTOMI4(RTOV4(rp))->mi_rootfh, fh)) 17907c478bd9Sstevel@tonic-gate isroot = 1; 17917c478bd9Sstevel@tonic-gate 17927c478bd9Sstevel@tonic-gate return (isroot); 17937c478bd9Sstevel@tonic-gate } 17947c478bd9Sstevel@tonic-gate 17957c478bd9Sstevel@tonic-gate #ifdef DEBUG 17967c478bd9Sstevel@tonic-gate 17977c478bd9Sstevel@tonic-gate /* 17987c478bd9Sstevel@tonic-gate * Look in the rnode table for other rnodes that have the same filehandle. 17997c478bd9Sstevel@tonic-gate * Assume the lock is held for the hash chain of checkrp 18007c478bd9Sstevel@tonic-gate */ 18017c478bd9Sstevel@tonic-gate 18027c478bd9Sstevel@tonic-gate static void 18037c478bd9Sstevel@tonic-gate r4_dup_check(rnode4_t *checkrp, vfs_t *vfsp) 18047c478bd9Sstevel@tonic-gate { 18057c478bd9Sstevel@tonic-gate rnode4_t *rp; 18067c478bd9Sstevel@tonic-gate vnode_t *tvp; 18077c478bd9Sstevel@tonic-gate nfs4_fhandle_t fh, fh2; 18087c478bd9Sstevel@tonic-gate int index; 18097c478bd9Sstevel@tonic-gate 18107c478bd9Sstevel@tonic-gate if (!r4_check_for_dups) 18117c478bd9Sstevel@tonic-gate return; 18127c478bd9Sstevel@tonic-gate 18137c478bd9Sstevel@tonic-gate ASSERT(RW_LOCK_HELD(&checkrp->r_hashq->r_lock)); 18147c478bd9Sstevel@tonic-gate 18157c478bd9Sstevel@tonic-gate sfh4_copyval(checkrp->r_fh, &fh); 18167c478bd9Sstevel@tonic-gate 18177c478bd9Sstevel@tonic-gate for (index = 0; index < rtable4size; index++) { 18187c478bd9Sstevel@tonic-gate 18197c478bd9Sstevel@tonic-gate if (&rtable4[index] != checkrp->r_hashq) 18207c478bd9Sstevel@tonic-gate rw_enter(&rtable4[index].r_lock, RW_READER); 18217c478bd9Sstevel@tonic-gate 18227c478bd9Sstevel@tonic-gate for (rp = rtable4[index].r_hashf; 18237c478bd9Sstevel@tonic-gate rp != (rnode4_t *)(&rtable4[index]); 18247c478bd9Sstevel@tonic-gate rp = rp->r_hashf) { 18257c478bd9Sstevel@tonic-gate 18267c478bd9Sstevel@tonic-gate if (rp == checkrp) 18277c478bd9Sstevel@tonic-gate continue; 18287c478bd9Sstevel@tonic-gate 18297c478bd9Sstevel@tonic-gate tvp = RTOV4(rp); 18307c478bd9Sstevel@tonic-gate if (tvp->v_vfsp != vfsp) 18317c478bd9Sstevel@tonic-gate continue; 18327c478bd9Sstevel@tonic-gate 18337c478bd9Sstevel@tonic-gate sfh4_copyval(rp->r_fh, &fh2); 18347c478bd9Sstevel@tonic-gate if (nfs4cmpfhandle(&fh, &fh2) == 0) { 18357c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "rnodes with same fs, fh " 18367c478bd9Sstevel@tonic-gate "(%p, %p)", (void *)checkrp, (void *)rp); 18377c478bd9Sstevel@tonic-gate } 18387c478bd9Sstevel@tonic-gate } 18397c478bd9Sstevel@tonic-gate 18407c478bd9Sstevel@tonic-gate if (&rtable4[index] != checkrp->r_hashq) 18417c478bd9Sstevel@tonic-gate rw_exit(&rtable4[index].r_lock); 18427c478bd9Sstevel@tonic-gate } 18437c478bd9Sstevel@tonic-gate } 18447c478bd9Sstevel@tonic-gate 18457c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 1846