xref: /illumos-gate/usr/src/uts/common/fs/ufs/ufs_lockfs.c (revision 1a5e258f)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
566c9f83dSowenr  * Common Development and Distribution License (the "License").
666c9f83dSowenr  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
2244c4f64bSJohn Levon  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
237c478bd9Sstevel@tonic-gate  */
247c478bd9Sstevel@tonic-gate 
257c478bd9Sstevel@tonic-gate #include <sys/types.h>
267c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
277c478bd9Sstevel@tonic-gate #include <sys/param.h>
287c478bd9Sstevel@tonic-gate #include <sys/time.h>
297c478bd9Sstevel@tonic-gate #include <sys/systm.h>
307c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
317c478bd9Sstevel@tonic-gate #include <sys/resource.h>
327c478bd9Sstevel@tonic-gate #include <sys/signal.h>
337c478bd9Sstevel@tonic-gate #include <sys/cred.h>
347c478bd9Sstevel@tonic-gate #include <sys/user.h>
357c478bd9Sstevel@tonic-gate #include <sys/buf.h>
367c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
377c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
387c478bd9Sstevel@tonic-gate #include <sys/proc.h>
397c478bd9Sstevel@tonic-gate #include <sys/disp.h>
407c478bd9Sstevel@tonic-gate #include <sys/file.h>
417c478bd9Sstevel@tonic-gate #include <sys/fcntl.h>
427c478bd9Sstevel@tonic-gate #include <sys/flock.h>
43bc69f433Saguzovsk #include <sys/atomic.h>
447c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
457c478bd9Sstevel@tonic-gate #include <sys/uio.h>
467c478bd9Sstevel@tonic-gate #include <sys/conf.h>
477c478bd9Sstevel@tonic-gate #include <sys/mman.h>
487c478bd9Sstevel@tonic-gate #include <sys/pathname.h>
497c478bd9Sstevel@tonic-gate #include <sys/debug.h>
507c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
517c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
527c478bd9Sstevel@tonic-gate #include <sys/acct.h>
537c478bd9Sstevel@tonic-gate #include <sys/dnlc.h>
547c478bd9Sstevel@tonic-gate #include <sys/swap.h>
557c478bd9Sstevel@tonic-gate 
567c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_fs.h>
577c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_inode.h>
587c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_fsdir.h>
597c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_trans.h>
607c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_panic.h>
617c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_mount.h>
627c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_bio.h>
637c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_log.h>
647c478bd9Sstevel@tonic-gate #include <sys/fs/ufs_quota.h>
657c478bd9Sstevel@tonic-gate #include <sys/dirent.h>		/* must be AFTER <sys/fs/fsdir.h>! */
667c478bd9Sstevel@tonic-gate #include <sys/errno.h>
677c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h>
687c478bd9Sstevel@tonic-gate 
697c478bd9Sstevel@tonic-gate #include <vm/hat.h>
707c478bd9Sstevel@tonic-gate #include <vm/pvn.h>
717c478bd9Sstevel@tonic-gate #include <vm/as.h>
727c478bd9Sstevel@tonic-gate #include <vm/seg.h>
737c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
747c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
757c478bd9Sstevel@tonic-gate #include <vm/rm.h>
767c478bd9Sstevel@tonic-gate #include <vm/anon.h>
777c478bd9Sstevel@tonic-gate #include <sys/swap.h>
787c478bd9Sstevel@tonic-gate #include <sys/dnlc.h>
797c478bd9Sstevel@tonic-gate 
807c478bd9Sstevel@tonic-gate extern struct vnode *common_specvp(struct vnode *vp);
817c478bd9Sstevel@tonic-gate 
827c478bd9Sstevel@tonic-gate /* error lock status */
837c478bd9Sstevel@tonic-gate #define	UN_ERRLCK	(-1)
847c478bd9Sstevel@tonic-gate #define	SET_ERRLCK	1
857c478bd9Sstevel@tonic-gate #define	RE_ERRLCK	2
867c478bd9Sstevel@tonic-gate #define	NO_ERRLCK	0
877c478bd9Sstevel@tonic-gate 
887c478bd9Sstevel@tonic-gate /*
897c478bd9Sstevel@tonic-gate  * Index to be used in TSD for storing lockfs data
907c478bd9Sstevel@tonic-gate  */
917c478bd9Sstevel@tonic-gate uint_t ufs_lockfs_key;
927c478bd9Sstevel@tonic-gate 
937c478bd9Sstevel@tonic-gate typedef struct _ulockfs_info {
947c478bd9Sstevel@tonic-gate 	struct _ulockfs_info *next;
957c478bd9Sstevel@tonic-gate 	struct ulockfs *ulp;
96303bf60bSsdebnath 	uint_t flags;
977c478bd9Sstevel@tonic-gate } ulockfs_info_t;
987c478bd9Sstevel@tonic-gate 
99303bf60bSsdebnath #define	ULOCK_INFO_FALLOCATE	0x00000001	/* fallocate thread */
100303bf60bSsdebnath 
1017c478bd9Sstevel@tonic-gate /*
1027c478bd9Sstevel@tonic-gate  * Check in TSD that whether we are already doing any VOP on this filesystem
1037c478bd9Sstevel@tonic-gate  */
1047c478bd9Sstevel@tonic-gate #define	IS_REC_VOP(found, head, ulp, free)		\
1057c478bd9Sstevel@tonic-gate {							\
1067c478bd9Sstevel@tonic-gate 	ulockfs_info_t *_curr;				\
1077c478bd9Sstevel@tonic-gate 							\
1087c478bd9Sstevel@tonic-gate 	for (found = 0, free = NULL, _curr = head;	\
1097c478bd9Sstevel@tonic-gate 	    _curr != NULL; _curr = _curr->next) {	\
1107c478bd9Sstevel@tonic-gate 		if ((free == NULL) &&			\
1117c478bd9Sstevel@tonic-gate 		    (_curr->ulp == NULL))		\
1127c478bd9Sstevel@tonic-gate 			free = _curr;			\
1137c478bd9Sstevel@tonic-gate 		if (_curr->ulp == ulp) {		\
1147c478bd9Sstevel@tonic-gate 			found = 1;			\
1157c478bd9Sstevel@tonic-gate 			break;				\
1167c478bd9Sstevel@tonic-gate 		}					\
1177c478bd9Sstevel@tonic-gate 	}						\
1187c478bd9Sstevel@tonic-gate }
1197c478bd9Sstevel@tonic-gate 
1207c478bd9Sstevel@tonic-gate /*
1217c478bd9Sstevel@tonic-gate  * Get the lockfs data from TSD so that lockfs handles the recursive VOP
1227c478bd9Sstevel@tonic-gate  * properly
1237c478bd9Sstevel@tonic-gate  */
1247c478bd9Sstevel@tonic-gate #define	SEARCH_ULOCKFSP(head, ulp, info)		\
1257c478bd9Sstevel@tonic-gate {							\
1267c478bd9Sstevel@tonic-gate 	ulockfs_info_t *_curr;				\
1277c478bd9Sstevel@tonic-gate 							\
1287c478bd9Sstevel@tonic-gate 	for (_curr = head; _curr != NULL;		\
1297c478bd9Sstevel@tonic-gate 	    _curr = _curr->next) {			\
1307c478bd9Sstevel@tonic-gate 		if (_curr->ulp == ulp) {		\
1317c478bd9Sstevel@tonic-gate 			break;				\
1327c478bd9Sstevel@tonic-gate 		}					\
1337c478bd9Sstevel@tonic-gate 	}						\
1347c478bd9Sstevel@tonic-gate 							\
1357c478bd9Sstevel@tonic-gate 	info = _curr;					\
1367c478bd9Sstevel@tonic-gate }
1377c478bd9Sstevel@tonic-gate 
1387c478bd9Sstevel@tonic-gate /*
1397c478bd9Sstevel@tonic-gate  * Validate lockfs request
1407c478bd9Sstevel@tonic-gate  */
1417c478bd9Sstevel@tonic-gate static int
ufs_getlfd(struct lockfs * lockfsp,struct lockfs * ul_lockfsp)1427c478bd9Sstevel@tonic-gate ufs_getlfd(
1437c478bd9Sstevel@tonic-gate 	struct lockfs *lockfsp,		/* new lock request */
1447c478bd9Sstevel@tonic-gate 	struct lockfs *ul_lockfsp)	/* old lock state */
1457c478bd9Sstevel@tonic-gate {
1467c478bd9Sstevel@tonic-gate 	int	error = 0;
1477c478bd9Sstevel@tonic-gate 
1487c478bd9Sstevel@tonic-gate 	/*
1497c478bd9Sstevel@tonic-gate 	 * no input flags defined
1507c478bd9Sstevel@tonic-gate 	 */
1517c478bd9Sstevel@tonic-gate 	if (lockfsp->lf_flags != 0) {
1527c478bd9Sstevel@tonic-gate 		error = EINVAL;
1537c478bd9Sstevel@tonic-gate 		goto errout;
1547c478bd9Sstevel@tonic-gate 	}
1557c478bd9Sstevel@tonic-gate 
1567c478bd9Sstevel@tonic-gate 	/*
1577c478bd9Sstevel@tonic-gate 	 * check key
1587c478bd9Sstevel@tonic-gate 	 */
1597c478bd9Sstevel@tonic-gate 	if (!LOCKFS_IS_ULOCK(ul_lockfsp))
1607c478bd9Sstevel@tonic-gate 		if (lockfsp->lf_key != ul_lockfsp->lf_key) {
1617c478bd9Sstevel@tonic-gate 			error = EINVAL;
1627c478bd9Sstevel@tonic-gate 			goto errout;
1637c478bd9Sstevel@tonic-gate 	}
1647c478bd9Sstevel@tonic-gate 
1657c478bd9Sstevel@tonic-gate 	lockfsp->lf_key = ul_lockfsp->lf_key + 1;
1667c478bd9Sstevel@tonic-gate 
1677c478bd9Sstevel@tonic-gate errout:
1687c478bd9Sstevel@tonic-gate 	return (error);
1697c478bd9Sstevel@tonic-gate }
1707c478bd9Sstevel@tonic-gate 
1717c478bd9Sstevel@tonic-gate /*
1727c478bd9Sstevel@tonic-gate  * ufs_checkaccton
1737c478bd9Sstevel@tonic-gate  *	check if accounting is turned on on this fs
1747c478bd9Sstevel@tonic-gate  */
1757c478bd9Sstevel@tonic-gate 
1767c478bd9Sstevel@tonic-gate int
ufs_checkaccton(struct vnode * vp)1777c478bd9Sstevel@tonic-gate ufs_checkaccton(struct vnode *vp)
1787c478bd9Sstevel@tonic-gate {
1797c478bd9Sstevel@tonic-gate 	if (acct_fs_in_use(vp))
1807c478bd9Sstevel@tonic-gate 		return (EDEADLK);
1817c478bd9Sstevel@tonic-gate 	return (0);
1827c478bd9Sstevel@tonic-gate }
1837c478bd9Sstevel@tonic-gate 
1847c478bd9Sstevel@tonic-gate /*
1857c478bd9Sstevel@tonic-gate  * ufs_checkswapon
1867c478bd9Sstevel@tonic-gate  *	check if local swapping is to file on this fs
1877c478bd9Sstevel@tonic-gate  */
1887c478bd9Sstevel@tonic-gate int
ufs_checkswapon(struct vnode * vp)1897c478bd9Sstevel@tonic-gate ufs_checkswapon(struct vnode *vp)
1907c478bd9Sstevel@tonic-gate {
1917c478bd9Sstevel@tonic-gate 	struct swapinfo	*sip;
1927c478bd9Sstevel@tonic-gate 
1937c478bd9Sstevel@tonic-gate 	mutex_enter(&swapinfo_lock);
1947c478bd9Sstevel@tonic-gate 	for (sip = swapinfo; sip; sip = sip->si_next)
1957c478bd9Sstevel@tonic-gate 		if (sip->si_vp->v_vfsp == vp->v_vfsp) {
1967c478bd9Sstevel@tonic-gate 			mutex_exit(&swapinfo_lock);
1977c478bd9Sstevel@tonic-gate 			return (EDEADLK);
1987c478bd9Sstevel@tonic-gate 		}
1997c478bd9Sstevel@tonic-gate 	mutex_exit(&swapinfo_lock);
2007c478bd9Sstevel@tonic-gate 	return (0);
2017c478bd9Sstevel@tonic-gate }
2027c478bd9Sstevel@tonic-gate 
2037c478bd9Sstevel@tonic-gate /*
2047c478bd9Sstevel@tonic-gate  * ufs_freeze
2057c478bd9Sstevel@tonic-gate  *	pend future accesses for current lock and desired lock
2067c478bd9Sstevel@tonic-gate  */
2077c478bd9Sstevel@tonic-gate void
ufs_freeze(struct ulockfs * ulp,struct lockfs * lockfsp)2087c478bd9Sstevel@tonic-gate ufs_freeze(struct ulockfs *ulp, struct lockfs *lockfsp)
2097c478bd9Sstevel@tonic-gate {
2107c478bd9Sstevel@tonic-gate 	/*
2117c478bd9Sstevel@tonic-gate 	 * set to new lock type
2127c478bd9Sstevel@tonic-gate 	 */
2137c478bd9Sstevel@tonic-gate 	ulp->ul_lockfs.lf_lock = lockfsp->lf_lock;
2147c478bd9Sstevel@tonic-gate 	ulp->ul_lockfs.lf_key = lockfsp->lf_key;
2157c478bd9Sstevel@tonic-gate 	ulp->ul_lockfs.lf_comlen = lockfsp->lf_comlen;
2167c478bd9Sstevel@tonic-gate 	ulp->ul_lockfs.lf_comment = lockfsp->lf_comment;
2177c478bd9Sstevel@tonic-gate 
2187c478bd9Sstevel@tonic-gate 	ulp->ul_fs_lock = (1 << ulp->ul_lockfs.lf_lock);
2197c478bd9Sstevel@tonic-gate }
2207c478bd9Sstevel@tonic-gate 
221bc69f433Saguzovsk /*
222bc69f433Saguzovsk  * All callers of ufs_quiesce() atomically increment ufs_quiesce_pend before
223bc69f433Saguzovsk  * starting ufs_quiesce() protocol and decrement it only when a file system no
224bc69f433Saguzovsk  * longer has to be in quiescent state. This allows ufs_pageio() to detect
225bc69f433Saguzovsk  * that another thread wants to quiesce a file system. See more comments in
226bc69f433Saguzovsk  * ufs_pageio().
227bc69f433Saguzovsk  */
228bc69f433Saguzovsk ulong_t ufs_quiesce_pend = 0;
229bc69f433Saguzovsk 
2307c478bd9Sstevel@tonic-gate /*
2317c478bd9Sstevel@tonic-gate  * ufs_quiesce
2327c478bd9Sstevel@tonic-gate  *	wait for outstanding accesses to finish
2337c478bd9Sstevel@tonic-gate  */
2347c478bd9Sstevel@tonic-gate int
ufs_quiesce(struct ulockfs * ulp)2357c478bd9Sstevel@tonic-gate ufs_quiesce(struct ulockfs *ulp)
2367c478bd9Sstevel@tonic-gate {
2377c478bd9Sstevel@tonic-gate 	int error = 0;
238303bf60bSsdebnath 	ulockfs_info_t *head;
239303bf60bSsdebnath 	ulockfs_info_t *info;
240129ce256Sbatschul 	klwp_t *lwp = ttolwp(curthread);
241303bf60bSsdebnath 
242303bf60bSsdebnath 	head = (ulockfs_info_t *)tsd_get(ufs_lockfs_key);
243303bf60bSsdebnath 	SEARCH_ULOCKFSP(head, ulp, info);
2447c478bd9Sstevel@tonic-gate 
245129ce256Sbatschul 	/*
246129ce256Sbatschul 	 * We have to keep /proc away from stopping us after we applied
247129ce256Sbatschul 	 * the softlock but before we got a chance to clear it again.
248129ce256Sbatschul 	 * prstop() may pagefault and become stuck on the softlock still
249129ce256Sbatschul 	 * pending.
250129ce256Sbatschul 	 */
251129ce256Sbatschul 	if (lwp != NULL)
252129ce256Sbatschul 		lwp->lwp_nostop++;
253129ce256Sbatschul 
2547c478bd9Sstevel@tonic-gate 	/*
2557c478bd9Sstevel@tonic-gate 	 * Set a softlock to suspend future ufs_vnops so that
2567c478bd9Sstevel@tonic-gate 	 * this lockfs request will not be starved
2577c478bd9Sstevel@tonic-gate 	 */
2587c478bd9Sstevel@tonic-gate 	ULOCKFS_SET_SLOCK(ulp);
259bc69f433Saguzovsk 	ASSERT(ufs_quiesce_pend);
2607c478bd9Sstevel@tonic-gate 
2617c478bd9Sstevel@tonic-gate 	/* check if there is any outstanding ufs vnodeops calls */
262303bf60bSsdebnath 	while (ulp->ul_vnops_cnt || ulp->ul_falloc_cnt) {
263bc69f433Saguzovsk 		/*
264bc69f433Saguzovsk 		 * use timed version of cv_wait_sig() to make sure we don't
265bc69f433Saguzovsk 		 * miss a wake up call from ufs_pageio() when it doesn't use
266bc69f433Saguzovsk 		 * ul_lock.
267303bf60bSsdebnath 		 *
268303bf60bSsdebnath 		 * when a fallocate thread comes in, the only way it returns
269303bf60bSsdebnath 		 * from this function is if there are no other vnode operations
270303bf60bSsdebnath 		 * going on (remember fallocate threads are tracked using
271303bf60bSsdebnath 		 * ul_falloc_cnt not ul_vnops_cnt), and another fallocate thread
272303bf60bSsdebnath 		 * hasn't already grabbed the fs write lock.
273bc69f433Saguzovsk 		 */
274303bf60bSsdebnath 		if (info && (info->flags & ULOCK_INFO_FALLOCATE)) {
275303bf60bSsdebnath 			if (!ulp->ul_vnops_cnt && !ULOCKFS_IS_FWLOCK(ulp))
276303bf60bSsdebnath 				goto out;
277303bf60bSsdebnath 		}
278d3d50737SRafael Vanoni 		if (!cv_reltimedwait_sig(&ulp->ul_cv, &ulp->ul_lock, hz,
279d3d50737SRafael Vanoni 		    TR_CLOCK_TICK)) {
2807c478bd9Sstevel@tonic-gate 			error = EINTR;
2817c478bd9Sstevel@tonic-gate 			goto out;
2827c478bd9Sstevel@tonic-gate 		}
283303bf60bSsdebnath 	}
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate out:
2867c478bd9Sstevel@tonic-gate 	/*
2877c478bd9Sstevel@tonic-gate 	 * unlock the soft lock
2887c478bd9Sstevel@tonic-gate 	 */
2897c478bd9Sstevel@tonic-gate 	ULOCKFS_CLR_SLOCK(ulp);
2907c478bd9Sstevel@tonic-gate 
291129ce256Sbatschul 	if (lwp != NULL)
292129ce256Sbatschul 		lwp->lwp_nostop--;
293129ce256Sbatschul 
2947c478bd9Sstevel@tonic-gate 	return (error);
2957c478bd9Sstevel@tonic-gate }
296303bf60bSsdebnath 
2977c478bd9Sstevel@tonic-gate /*
2987c478bd9Sstevel@tonic-gate  * ufs_flush_inode
2997c478bd9Sstevel@tonic-gate  */
3007c478bd9Sstevel@tonic-gate int
ufs_flush_inode(struct inode * ip,void * arg)3017c478bd9Sstevel@tonic-gate ufs_flush_inode(struct inode *ip, void *arg)
3027c478bd9Sstevel@tonic-gate {
3037c478bd9Sstevel@tonic-gate 	int	error;
3047c478bd9Sstevel@tonic-gate 	int	saverror	= 0;
3057c478bd9Sstevel@tonic-gate 
3067c478bd9Sstevel@tonic-gate 	/*
3077c478bd9Sstevel@tonic-gate 	 * wrong file system; keep looking
3087c478bd9Sstevel@tonic-gate 	 */
3097c478bd9Sstevel@tonic-gate 	if (ip->i_ufsvfs != (struct ufsvfs *)arg)
3107c478bd9Sstevel@tonic-gate 		return (0);
3117c478bd9Sstevel@tonic-gate 
3127c478bd9Sstevel@tonic-gate 	/*
3137c478bd9Sstevel@tonic-gate 	 * asynchronously push all the dirty pages
3147c478bd9Sstevel@tonic-gate 	 */
3157c478bd9Sstevel@tonic-gate 	if (((error = TRANS_SYNCIP(ip, B_ASYNC, 0, TOP_SYNCIP_FLUSHI)) != 0) &&
3167c478bd9Sstevel@tonic-gate 	    (error != EAGAIN))
3177c478bd9Sstevel@tonic-gate 		saverror = error;
3187c478bd9Sstevel@tonic-gate 	/*
3197c478bd9Sstevel@tonic-gate 	 * wait for io and discard all mappings
3207c478bd9Sstevel@tonic-gate 	 */
3217c478bd9Sstevel@tonic-gate 	if (error = TRANS_SYNCIP(ip, B_INVAL, 0, TOP_SYNCIP_FLUSHI))
3227c478bd9Sstevel@tonic-gate 		saverror = error;
3237c478bd9Sstevel@tonic-gate 
3247c478bd9Sstevel@tonic-gate 	if (ITOV(ip)->v_type == VDIR) {
3257c478bd9Sstevel@tonic-gate 		dnlc_dir_purge(&ip->i_danchor);
3267c478bd9Sstevel@tonic-gate 	}
3277c478bd9Sstevel@tonic-gate 
3287c478bd9Sstevel@tonic-gate 	return (saverror);
3297c478bd9Sstevel@tonic-gate }
3307c478bd9Sstevel@tonic-gate 
3317c478bd9Sstevel@tonic-gate /*
3327c478bd9Sstevel@tonic-gate  * ufs_flush
3337c478bd9Sstevel@tonic-gate  *	Flush everything that is currently dirty; this includes invalidating
3347c478bd9Sstevel@tonic-gate  *	any mappings.
3357c478bd9Sstevel@tonic-gate  */
3367c478bd9Sstevel@tonic-gate int
ufs_flush(struct vfs * vfsp)3377c478bd9Sstevel@tonic-gate ufs_flush(struct vfs *vfsp)
3387c478bd9Sstevel@tonic-gate {
3397c478bd9Sstevel@tonic-gate 	int		error;
3407c478bd9Sstevel@tonic-gate 	int		saverror = 0;
3417c478bd9Sstevel@tonic-gate 	struct ufsvfs	*ufsvfsp	= (struct ufsvfs *)vfsp->vfs_data;
3427c478bd9Sstevel@tonic-gate 	struct fs	*fs		= ufsvfsp->vfs_fs;
34346ac4468Smishra 	int		tdontblock = 0;
3447c478bd9Sstevel@tonic-gate 
3457c478bd9Sstevel@tonic-gate 	ASSERT(vfs_lock_held(vfsp));
3467c478bd9Sstevel@tonic-gate 
3477c478bd9Sstevel@tonic-gate 	/*
3487c478bd9Sstevel@tonic-gate 	 * purge dnlc
3497c478bd9Sstevel@tonic-gate 	 */
3507c478bd9Sstevel@tonic-gate 	(void) dnlc_purge_vfsp(vfsp, 0);
3517c478bd9Sstevel@tonic-gate 
3527c478bd9Sstevel@tonic-gate 	/*
3537c478bd9Sstevel@tonic-gate 	 * drain the delete and idle threads
3547c478bd9Sstevel@tonic-gate 	 */
3557c478bd9Sstevel@tonic-gate 	ufs_delete_drain(vfsp, 0, 0);
3567c478bd9Sstevel@tonic-gate 	ufs_idle_drain(vfsp);
3577c478bd9Sstevel@tonic-gate 
3587c478bd9Sstevel@tonic-gate 	/*
3597c478bd9Sstevel@tonic-gate 	 * flush and invalidate quota records
3607c478bd9Sstevel@tonic-gate 	 */
3617c478bd9Sstevel@tonic-gate 	(void) qsync(ufsvfsp);
3627c478bd9Sstevel@tonic-gate 
3637c478bd9Sstevel@tonic-gate 	/*
3647c478bd9Sstevel@tonic-gate 	 * flush w/invalidate the inodes for vfsp
3657c478bd9Sstevel@tonic-gate 	 */
3667c478bd9Sstevel@tonic-gate 	if (error = ufs_scan_inodes(0, ufs_flush_inode, ufsvfsp, ufsvfsp))
3677c478bd9Sstevel@tonic-gate 		saverror = error;
3687c478bd9Sstevel@tonic-gate 
3697c478bd9Sstevel@tonic-gate 	/*
3707c478bd9Sstevel@tonic-gate 	 * synchronously flush superblock and summary info
3717c478bd9Sstevel@tonic-gate 	 */
3727c478bd9Sstevel@tonic-gate 	if (fs->fs_ronly == 0 && fs->fs_fmod) {
3737c478bd9Sstevel@tonic-gate 		fs->fs_fmod = 0;
3747c478bd9Sstevel@tonic-gate 		TRANS_SBUPDATE(ufsvfsp, vfsp, TOP_SBUPDATE_FLUSH);
3757c478bd9Sstevel@tonic-gate 	}
3767c478bd9Sstevel@tonic-gate 	/*
3777c478bd9Sstevel@tonic-gate 	 * flush w/invalidate block device pages and buf cache
3787c478bd9Sstevel@tonic-gate 	 */
3797c478bd9Sstevel@tonic-gate 	if ((error = VOP_PUTPAGE(common_specvp(ufsvfsp->vfs_devvp),
380da6c28aaSamw 	    (offset_t)0, 0, B_INVAL, CRED(), NULL)) > 0)
3817c478bd9Sstevel@tonic-gate 		saverror = error;
3827c478bd9Sstevel@tonic-gate 
3837c478bd9Sstevel@tonic-gate 	(void) bflush((dev_t)vfsp->vfs_dev);
3847c478bd9Sstevel@tonic-gate 	(void) bfinval((dev_t)vfsp->vfs_dev, 0);
3857c478bd9Sstevel@tonic-gate 
3867c478bd9Sstevel@tonic-gate 	/*
3877c478bd9Sstevel@tonic-gate 	 * drain the delete and idle threads again
3887c478bd9Sstevel@tonic-gate 	 */
3897c478bd9Sstevel@tonic-gate 	ufs_delete_drain(vfsp, 0, 0);
3907c478bd9Sstevel@tonic-gate 	ufs_idle_drain(vfsp);
3917c478bd9Sstevel@tonic-gate 
3927c478bd9Sstevel@tonic-gate 	/*
3937c478bd9Sstevel@tonic-gate 	 * play with the clean flag
3947c478bd9Sstevel@tonic-gate 	 */
3957c478bd9Sstevel@tonic-gate 	if (saverror == 0)
3967c478bd9Sstevel@tonic-gate 		ufs_checkclean(vfsp);
3977c478bd9Sstevel@tonic-gate 
3987c478bd9Sstevel@tonic-gate 	/*
39914c932c0Sbatschul 	 * Flush any outstanding transactions and roll the log
40014c932c0Sbatschul 	 * only if we are supposed to do, i.e. LDL_NOROLL not set.
40114c932c0Sbatschul 	 * We can not simply check for fs_ronly here since fsck also may
40214c932c0Sbatschul 	 * use this code to roll the log on a read-only filesystem, e.g.
40314c932c0Sbatschul 	 * root during early stages of boot, if other then a sanity check is
40414c932c0Sbatschul 	 * done, it will clear LDL_NOROLL before.
40514c932c0Sbatschul 	 * In addition we assert that the deltamap does not contain any deltas
40614c932c0Sbatschul 	 * in case LDL_NOROLL is set since this is not supposed to happen.
4077c478bd9Sstevel@tonic-gate 	 */
4087c478bd9Sstevel@tonic-gate 	if (TRANS_ISTRANS(ufsvfsp)) {
40914c932c0Sbatschul 		ml_unit_t	*ul	= ufsvfsp->vfs_log;
41014c932c0Sbatschul 		mt_map_t	*mtm	= ul->un_deltamap;
41114c932c0Sbatschul 
41214c932c0Sbatschul 		if (ul->un_flags & LDL_NOROLL) {
41314c932c0Sbatschul 			ASSERT(mtm->mtm_nme == 0);
41414c932c0Sbatschul 		} else {
41546ac4468Smishra 			/*
41646ac4468Smishra 			 * Do not set T_DONTBLOCK if there is a
41746ac4468Smishra 			 * transaction opened by caller.
41846ac4468Smishra 			 */
41946ac4468Smishra 			if (curthread->t_flag & T_DONTBLOCK)
42046ac4468Smishra 				tdontblock = 1;
42146ac4468Smishra 			else
42246ac4468Smishra 				curthread->t_flag |= T_DONTBLOCK;
42346ac4468Smishra 
42414c932c0Sbatschul 			TRANS_BEGIN_SYNC(ufsvfsp, TOP_COMMIT_FLUSH,
42514c932c0Sbatschul 			    TOP_COMMIT_SIZE, error);
42646ac4468Smishra 
42714c932c0Sbatschul 			if (!error) {
42814c932c0Sbatschul 				TRANS_END_SYNC(ufsvfsp, saverror,
42914c932c0Sbatschul 				    TOP_COMMIT_FLUSH, TOP_COMMIT_SIZE);
43014c932c0Sbatschul 			}
43146ac4468Smishra 
43246ac4468Smishra 			if (tdontblock == 0)
43346ac4468Smishra 				curthread->t_flag &= ~T_DONTBLOCK;
43446ac4468Smishra 
43514c932c0Sbatschul 			logmap_roll_dev(ufsvfsp->vfs_log);
4367c478bd9Sstevel@tonic-gate 		}
4377c478bd9Sstevel@tonic-gate 	}
4387c478bd9Sstevel@tonic-gate 
4397c478bd9Sstevel@tonic-gate 	return (saverror);
4407c478bd9Sstevel@tonic-gate }
4417c478bd9Sstevel@tonic-gate 
4427c478bd9Sstevel@tonic-gate /*
4437c478bd9Sstevel@tonic-gate  * ufs_thaw_wlock
4447c478bd9Sstevel@tonic-gate  *	special processing when thawing down to wlock
4457c478bd9Sstevel@tonic-gate  */
4467c478bd9Sstevel@tonic-gate static int
ufs_thaw_wlock(struct inode * ip,void * arg)4477c478bd9Sstevel@tonic-gate ufs_thaw_wlock(struct inode *ip, void *arg)
4487c478bd9Sstevel@tonic-gate {
4497c478bd9Sstevel@tonic-gate 	/*
4507c478bd9Sstevel@tonic-gate 	 * wrong file system; keep looking
4517c478bd9Sstevel@tonic-gate 	 */
4527c478bd9Sstevel@tonic-gate 	if (ip->i_ufsvfs != (struct ufsvfs *)arg)
4537c478bd9Sstevel@tonic-gate 		return (0);
4547c478bd9Sstevel@tonic-gate 
4557c478bd9Sstevel@tonic-gate 	/*
4567c478bd9Sstevel@tonic-gate 	 * iupdat refuses to clear flags if the fs is read only.  The fs
4577c478bd9Sstevel@tonic-gate 	 * may become read/write during the lock and we wouldn't want
4587c478bd9Sstevel@tonic-gate 	 * these inodes being written to disk.  So clear the flags.
4597c478bd9Sstevel@tonic-gate 	 */
4607c478bd9Sstevel@tonic-gate 	rw_enter(&ip->i_contents, RW_WRITER);
4617c478bd9Sstevel@tonic-gate 	ip->i_flag &= ~(IMOD|IMODACC|IACC|IUPD|ICHG|IATTCHG);
4627c478bd9Sstevel@tonic-gate 	rw_exit(&ip->i_contents);
4637c478bd9Sstevel@tonic-gate 
4647c478bd9Sstevel@tonic-gate 	/*
4657c478bd9Sstevel@tonic-gate 	 * pages are mlocked -- fail wlock
4667c478bd9Sstevel@tonic-gate 	 */
4677c478bd9Sstevel@tonic-gate 	if (ITOV(ip)->v_type != VCHR && vn_has_cached_data(ITOV(ip)))
4687c478bd9Sstevel@tonic-gate 		return (EBUSY);
4697c478bd9Sstevel@tonic-gate 
4707c478bd9Sstevel@tonic-gate 	return (0);
4717c478bd9Sstevel@tonic-gate }
4727c478bd9Sstevel@tonic-gate 
4737c478bd9Sstevel@tonic-gate /*
4747c478bd9Sstevel@tonic-gate  * ufs_thaw_hlock
4757c478bd9Sstevel@tonic-gate  *	special processing when thawing down to hlock or elock
4767c478bd9Sstevel@tonic-gate  */
4777c478bd9Sstevel@tonic-gate static int
ufs_thaw_hlock(struct inode * ip,void * arg)4787c478bd9Sstevel@tonic-gate ufs_thaw_hlock(struct inode *ip, void *arg)
4797c478bd9Sstevel@tonic-gate {
4807c478bd9Sstevel@tonic-gate 	struct vnode	*vp	= ITOV(ip);
4817c478bd9Sstevel@tonic-gate 
4827c478bd9Sstevel@tonic-gate 	/*
4837c478bd9Sstevel@tonic-gate 	 * wrong file system; keep looking
4847c478bd9Sstevel@tonic-gate 	 */
4857c478bd9Sstevel@tonic-gate 	if (ip->i_ufsvfs != (struct ufsvfs *)arg)
4867c478bd9Sstevel@tonic-gate 		return (0);
4877c478bd9Sstevel@tonic-gate 
4887c478bd9Sstevel@tonic-gate 	/*
4897c478bd9Sstevel@tonic-gate 	 * blow away all pages - even if they are mlocked
4907c478bd9Sstevel@tonic-gate 	 */
4917c478bd9Sstevel@tonic-gate 	do {
4927c478bd9Sstevel@tonic-gate 		(void) TRANS_SYNCIP(ip, B_INVAL | B_FORCE, 0, TOP_SYNCIP_HLOCK);
4937c478bd9Sstevel@tonic-gate 	} while ((vp->v_type != VCHR) && vn_has_cached_data(vp));
4947c478bd9Sstevel@tonic-gate 	rw_enter(&ip->i_contents, RW_WRITER);
4957c478bd9Sstevel@tonic-gate 	ip->i_flag &= ~(IMOD|IMODACC|IACC|IUPD|ICHG|IATTCHG);
4967c478bd9Sstevel@tonic-gate 	rw_exit(&ip->i_contents);
4977c478bd9Sstevel@tonic-gate 
4987c478bd9Sstevel@tonic-gate 	return (0);
4997c478bd9Sstevel@tonic-gate }
5007c478bd9Sstevel@tonic-gate 
5017c478bd9Sstevel@tonic-gate /*
5027c478bd9Sstevel@tonic-gate  * ufs_thaw
5037c478bd9Sstevel@tonic-gate  *	thaw file system lock down to current value
5047c478bd9Sstevel@tonic-gate  */
5057c478bd9Sstevel@tonic-gate int
ufs_thaw(struct vfs * vfsp,struct ufsvfs * ufsvfsp,struct ulockfs * ulp)5067c478bd9Sstevel@tonic-gate ufs_thaw(struct vfs *vfsp, struct ufsvfs *ufsvfsp, struct ulockfs *ulp)
5077c478bd9Sstevel@tonic-gate {
5087c478bd9Sstevel@tonic-gate 	int		error	= 0;
5097c478bd9Sstevel@tonic-gate 	int		noidel	= (int)(ulp->ul_flag & ULOCKFS_NOIDEL);
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 	/*
5127c478bd9Sstevel@tonic-gate 	 * if wlock or hlock or elock
5137c478bd9Sstevel@tonic-gate 	 */
5147c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_WLOCK(ulp) || ULOCKFS_IS_HLOCK(ulp) ||
5157c478bd9Sstevel@tonic-gate 	    ULOCKFS_IS_ELOCK(ulp)) {
5167c478bd9Sstevel@tonic-gate 
5177c478bd9Sstevel@tonic-gate 		/*
5187c478bd9Sstevel@tonic-gate 		 * don't keep access times
5197c478bd9Sstevel@tonic-gate 		 * don't free deleted files
5207c478bd9Sstevel@tonic-gate 		 * if superblock writes are allowed, limit them to me for now
5217c478bd9Sstevel@tonic-gate 		 */
5227c478bd9Sstevel@tonic-gate 		ulp->ul_flag |= (ULOCKFS_NOIACC|ULOCKFS_NOIDEL);
5237c478bd9Sstevel@tonic-gate 		if (ulp->ul_sbowner != (kthread_id_t)-1)
5247c478bd9Sstevel@tonic-gate 			ulp->ul_sbowner = curthread;
5257c478bd9Sstevel@tonic-gate 
5267c478bd9Sstevel@tonic-gate 		/*
5277c478bd9Sstevel@tonic-gate 		 * wait for writes for deleted files and superblock updates
5287c478bd9Sstevel@tonic-gate 		 */
5297c478bd9Sstevel@tonic-gate 		(void) ufs_flush(vfsp);
5307c478bd9Sstevel@tonic-gate 
5317c478bd9Sstevel@tonic-gate 		/*
5327c478bd9Sstevel@tonic-gate 		 * now make sure the quota file is up-to-date
5337c478bd9Sstevel@tonic-gate 		 *	expensive; but effective
5347c478bd9Sstevel@tonic-gate 		 */
5357c478bd9Sstevel@tonic-gate 		error = ufs_flush(vfsp);
5367c478bd9Sstevel@tonic-gate 		/*
5377c478bd9Sstevel@tonic-gate 		 * no one can write the superblock
5387c478bd9Sstevel@tonic-gate 		 */
5397c478bd9Sstevel@tonic-gate 		ulp->ul_sbowner = (kthread_id_t)-1;
5407c478bd9Sstevel@tonic-gate 
5417c478bd9Sstevel@tonic-gate 		/*
5427c478bd9Sstevel@tonic-gate 		 * special processing for wlock/hlock/elock
5437c478bd9Sstevel@tonic-gate 		 */
5447c478bd9Sstevel@tonic-gate 		if (ULOCKFS_IS_WLOCK(ulp)) {
5457c478bd9Sstevel@tonic-gate 			if (error)
5467c478bd9Sstevel@tonic-gate 				goto errout;
5477c478bd9Sstevel@tonic-gate 			error = bfinval(ufsvfsp->vfs_dev, 0);
5487c478bd9Sstevel@tonic-gate 			if (error)
5497c478bd9Sstevel@tonic-gate 				goto errout;
5507c478bd9Sstevel@tonic-gate 			error = ufs_scan_inodes(0, ufs_thaw_wlock,
55180d34432Sfrankho 			    (void *)ufsvfsp, ufsvfsp);
5527c478bd9Sstevel@tonic-gate 			if (error)
5537c478bd9Sstevel@tonic-gate 				goto errout;
5547c478bd9Sstevel@tonic-gate 		}
5557c478bd9Sstevel@tonic-gate 		if (ULOCKFS_IS_HLOCK(ulp) || ULOCKFS_IS_ELOCK(ulp)) {
5567c478bd9Sstevel@tonic-gate 			error = 0;
5577c478bd9Sstevel@tonic-gate 			(void) ufs_scan_inodes(0, ufs_thaw_hlock,
55880d34432Sfrankho 			    (void *)ufsvfsp, ufsvfsp);
5597c478bd9Sstevel@tonic-gate 			(void) bfinval(ufsvfsp->vfs_dev, 1);
5607c478bd9Sstevel@tonic-gate 		}
5617c478bd9Sstevel@tonic-gate 	} else {
5627c478bd9Sstevel@tonic-gate 
5637c478bd9Sstevel@tonic-gate 		/*
5647c478bd9Sstevel@tonic-gate 		 * okay to keep access times
5657c478bd9Sstevel@tonic-gate 		 * okay to free deleted files
5667c478bd9Sstevel@tonic-gate 		 * okay to write the superblock
5677c478bd9Sstevel@tonic-gate 		 */
5687c478bd9Sstevel@tonic-gate 		ulp->ul_flag &= ~(ULOCKFS_NOIACC|ULOCKFS_NOIDEL);
5697c478bd9Sstevel@tonic-gate 		ulp->ul_sbowner = NULL;
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 		/*
5727c478bd9Sstevel@tonic-gate 		 * flush in case deleted files are in memory
5737c478bd9Sstevel@tonic-gate 		 */
5747c478bd9Sstevel@tonic-gate 		if (noidel) {
5757c478bd9Sstevel@tonic-gate 			if (error = ufs_flush(vfsp))
5767c478bd9Sstevel@tonic-gate 				goto errout;
5777c478bd9Sstevel@tonic-gate 		}
5787c478bd9Sstevel@tonic-gate 	}
5797c478bd9Sstevel@tonic-gate 
5807c478bd9Sstevel@tonic-gate errout:
5817c478bd9Sstevel@tonic-gate 	cv_broadcast(&ulp->ul_cv);
5827c478bd9Sstevel@tonic-gate 	return (error);
5837c478bd9Sstevel@tonic-gate }
5847c478bd9Sstevel@tonic-gate 
5857c478bd9Sstevel@tonic-gate /*
5867c478bd9Sstevel@tonic-gate  * ufs_reconcile_fs
5877c478bd9Sstevel@tonic-gate  *	reconcile incore superblock with ondisk superblock
5887c478bd9Sstevel@tonic-gate  */
5897c478bd9Sstevel@tonic-gate int
ufs_reconcile_fs(struct vfs * vfsp,struct ufsvfs * ufsvfsp,int errlck)5907c478bd9Sstevel@tonic-gate ufs_reconcile_fs(struct vfs *vfsp, struct ufsvfs *ufsvfsp, int errlck)
5917c478bd9Sstevel@tonic-gate {
5927c478bd9Sstevel@tonic-gate 	struct fs	*mfs; 	/* in-memory superblock */
5937c478bd9Sstevel@tonic-gate 	struct fs	*dfs;	/* on-disk   superblock */
5947c478bd9Sstevel@tonic-gate 	struct buf	*bp;	/* on-disk   superblock buf */
5957c478bd9Sstevel@tonic-gate 	int		 needs_unlock;
5967c478bd9Sstevel@tonic-gate 	char		 finished_fsclean;
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate 	mfs = ufsvfsp->vfs_fs;
5997c478bd9Sstevel@tonic-gate 
6007c478bd9Sstevel@tonic-gate 	/*
6017c478bd9Sstevel@tonic-gate 	 * get the on-disk copy of the superblock
6027c478bd9Sstevel@tonic-gate 	 */
6037c478bd9Sstevel@tonic-gate 	bp = UFS_BREAD(ufsvfsp, vfsp->vfs_dev, SBLOCK, SBSIZE);
6047c478bd9Sstevel@tonic-gate 	bp->b_flags |= (B_STALE|B_AGE);
6057c478bd9Sstevel@tonic-gate 	if (bp->b_flags & B_ERROR) {
6067c478bd9Sstevel@tonic-gate 		brelse(bp);
6077c478bd9Sstevel@tonic-gate 		return (EIO);
6087c478bd9Sstevel@tonic-gate 	}
6097c478bd9Sstevel@tonic-gate 	dfs = bp->b_un.b_fs;
6107c478bd9Sstevel@tonic-gate 
6117c478bd9Sstevel@tonic-gate 	/* error locks may only unlock after the fs has been made consistent */
6127c478bd9Sstevel@tonic-gate 	if (errlck == UN_ERRLCK) {
6137c478bd9Sstevel@tonic-gate 		if (dfs->fs_clean == FSFIX) {	/* being repaired */
6147c478bd9Sstevel@tonic-gate 			brelse(bp);
6157c478bd9Sstevel@tonic-gate 			return (EAGAIN);
6167c478bd9Sstevel@tonic-gate 		}
6177c478bd9Sstevel@tonic-gate 		/* repair not yet started? */
6187c478bd9Sstevel@tonic-gate 		finished_fsclean = TRANS_ISTRANS(ufsvfsp)? FSLOG: FSCLEAN;
6197c478bd9Sstevel@tonic-gate 		if (dfs->fs_clean != finished_fsclean) {
6207c478bd9Sstevel@tonic-gate 			brelse(bp);
6217c478bd9Sstevel@tonic-gate 			return (EBUSY);
6227c478bd9Sstevel@tonic-gate 		}
6237c478bd9Sstevel@tonic-gate 	}
6247c478bd9Sstevel@tonic-gate 
6257c478bd9Sstevel@tonic-gate 	/*
6267c478bd9Sstevel@tonic-gate 	 * if superblock has changed too much, abort
6277c478bd9Sstevel@tonic-gate 	 */
6287c478bd9Sstevel@tonic-gate 	if ((mfs->fs_sblkno		!= dfs->fs_sblkno) ||
6297c478bd9Sstevel@tonic-gate 	    (mfs->fs_cblkno		!= dfs->fs_cblkno) ||
6307c478bd9Sstevel@tonic-gate 	    (mfs->fs_iblkno		!= dfs->fs_iblkno) ||
6317c478bd9Sstevel@tonic-gate 	    (mfs->fs_dblkno		!= dfs->fs_dblkno) ||
6327c478bd9Sstevel@tonic-gate 	    (mfs->fs_cgoffset		!= dfs->fs_cgoffset) ||
6337c478bd9Sstevel@tonic-gate 	    (mfs->fs_cgmask		!= dfs->fs_cgmask) ||
6347c478bd9Sstevel@tonic-gate 	    (mfs->fs_bsize		!= dfs->fs_bsize) ||
6357c478bd9Sstevel@tonic-gate 	    (mfs->fs_fsize		!= dfs->fs_fsize) ||
6367c478bd9Sstevel@tonic-gate 	    (mfs->fs_frag		!= dfs->fs_frag) ||
6377c478bd9Sstevel@tonic-gate 	    (mfs->fs_bmask		!= dfs->fs_bmask) ||
6387c478bd9Sstevel@tonic-gate 	    (mfs->fs_fmask		!= dfs->fs_fmask) ||
6397c478bd9Sstevel@tonic-gate 	    (mfs->fs_bshift		!= dfs->fs_bshift) ||
6407c478bd9Sstevel@tonic-gate 	    (mfs->fs_fshift		!= dfs->fs_fshift) ||
6417c478bd9Sstevel@tonic-gate 	    (mfs->fs_fragshift		!= dfs->fs_fragshift) ||
6427c478bd9Sstevel@tonic-gate 	    (mfs->fs_fsbtodb		!= dfs->fs_fsbtodb) ||
6437c478bd9Sstevel@tonic-gate 	    (mfs->fs_sbsize		!= dfs->fs_sbsize) ||
6447c478bd9Sstevel@tonic-gate 	    (mfs->fs_nindir		!= dfs->fs_nindir) ||
6457c478bd9Sstevel@tonic-gate 	    (mfs->fs_nspf		!= dfs->fs_nspf) ||
6467c478bd9Sstevel@tonic-gate 	    (mfs->fs_trackskew		!= dfs->fs_trackskew) ||
6477c478bd9Sstevel@tonic-gate 	    (mfs->fs_cgsize		!= dfs->fs_cgsize) ||
6487c478bd9Sstevel@tonic-gate 	    (mfs->fs_ntrak		!= dfs->fs_ntrak) ||
6497c478bd9Sstevel@tonic-gate 	    (mfs->fs_nsect		!= dfs->fs_nsect) ||
6507c478bd9Sstevel@tonic-gate 	    (mfs->fs_spc		!= dfs->fs_spc) ||
6517c478bd9Sstevel@tonic-gate 	    (mfs->fs_cpg		!= dfs->fs_cpg) ||
6527c478bd9Sstevel@tonic-gate 	    (mfs->fs_ipg		!= dfs->fs_ipg) ||
6537c478bd9Sstevel@tonic-gate 	    (mfs->fs_fpg		!= dfs->fs_fpg) ||
6547c478bd9Sstevel@tonic-gate 	    (mfs->fs_postblformat	!= dfs->fs_postblformat) ||
6557c478bd9Sstevel@tonic-gate 	    (mfs->fs_magic		!= dfs->fs_magic)) {
6567c478bd9Sstevel@tonic-gate 		brelse(bp);
6577c478bd9Sstevel@tonic-gate 		return (EACCES);
6587c478bd9Sstevel@tonic-gate 	}
6597c478bd9Sstevel@tonic-gate 	if (dfs->fs_clean == FSBAD || FSOKAY != dfs->fs_state + dfs->fs_time)
6607c478bd9Sstevel@tonic-gate 		if (mfs->fs_clean == FSLOG) {
6617c478bd9Sstevel@tonic-gate 			brelse(bp);
6627c478bd9Sstevel@tonic-gate 			return (EACCES);
6637c478bd9Sstevel@tonic-gate 		}
6647c478bd9Sstevel@tonic-gate 
6657c478bd9Sstevel@tonic-gate 	/*
6667c478bd9Sstevel@tonic-gate 	 * get new summary info
6677c478bd9Sstevel@tonic-gate 	 */
6687c478bd9Sstevel@tonic-gate 	if (ufs_getsummaryinfo(vfsp->vfs_dev, ufsvfsp, dfs)) {
6697c478bd9Sstevel@tonic-gate 		brelse(bp);
6707c478bd9Sstevel@tonic-gate 		return (EIO);
6717c478bd9Sstevel@tonic-gate 	}
6727c478bd9Sstevel@tonic-gate 
6737c478bd9Sstevel@tonic-gate 	/*
6747c478bd9Sstevel@tonic-gate 	 * release old summary info and update in-memory superblock
6757c478bd9Sstevel@tonic-gate 	 */
6767c478bd9Sstevel@tonic-gate 	kmem_free(mfs->fs_u.fs_csp, mfs->fs_cssize);
6777c478bd9Sstevel@tonic-gate 	mfs->fs_u.fs_csp = dfs->fs_u.fs_csp;	/* Only entry 0 used */
6787c478bd9Sstevel@tonic-gate 
6797c478bd9Sstevel@tonic-gate 	/*
6807c478bd9Sstevel@tonic-gate 	 * update fields allowed to change
6817c478bd9Sstevel@tonic-gate 	 */
6827c478bd9Sstevel@tonic-gate 	mfs->fs_size		= dfs->fs_size;
6837c478bd9Sstevel@tonic-gate 	mfs->fs_dsize		= dfs->fs_dsize;
6847c478bd9Sstevel@tonic-gate 	mfs->fs_ncg		= dfs->fs_ncg;
6857c478bd9Sstevel@tonic-gate 	mfs->fs_minfree		= dfs->fs_minfree;
6867c478bd9Sstevel@tonic-gate 	mfs->fs_rotdelay	= dfs->fs_rotdelay;
6877c478bd9Sstevel@tonic-gate 	mfs->fs_rps		= dfs->fs_rps;
6887c478bd9Sstevel@tonic-gate 	mfs->fs_maxcontig	= dfs->fs_maxcontig;
6897c478bd9Sstevel@tonic-gate 	mfs->fs_maxbpg		= dfs->fs_maxbpg;
6907c478bd9Sstevel@tonic-gate 	mfs->fs_csmask		= dfs->fs_csmask;
6917c478bd9Sstevel@tonic-gate 	mfs->fs_csshift		= dfs->fs_csshift;
6927c478bd9Sstevel@tonic-gate 	mfs->fs_optim		= dfs->fs_optim;
6937c478bd9Sstevel@tonic-gate 	mfs->fs_csaddr		= dfs->fs_csaddr;
6947c478bd9Sstevel@tonic-gate 	mfs->fs_cssize		= dfs->fs_cssize;
6957c478bd9Sstevel@tonic-gate 	mfs->fs_ncyl		= dfs->fs_ncyl;
6967c478bd9Sstevel@tonic-gate 	mfs->fs_cstotal		= dfs->fs_cstotal;
6977c478bd9Sstevel@tonic-gate 	mfs->fs_reclaim		= dfs->fs_reclaim;
6987c478bd9Sstevel@tonic-gate 
6997c478bd9Sstevel@tonic-gate 	if (mfs->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
7007c478bd9Sstevel@tonic-gate 		mfs->fs_reclaim &= ~FS_RECLAIM;
7017c478bd9Sstevel@tonic-gate 		mfs->fs_reclaim |=  FS_RECLAIMING;
7027c478bd9Sstevel@tonic-gate 		ufs_thread_start(&ufsvfsp->vfs_reclaim,
70380d34432Sfrankho 		    ufs_thread_reclaim, vfsp);
7047c478bd9Sstevel@tonic-gate 	}
7057c478bd9Sstevel@tonic-gate 
7067c478bd9Sstevel@tonic-gate 	/* XXX What to do about sparecon? */
7077c478bd9Sstevel@tonic-gate 
7087c478bd9Sstevel@tonic-gate 	/* XXX need to copy volume label */
7097c478bd9Sstevel@tonic-gate 
7107c478bd9Sstevel@tonic-gate 	/*
7117c478bd9Sstevel@tonic-gate 	 * ondisk clean flag overrides inmemory clean flag iff == FSBAD
7127c478bd9Sstevel@tonic-gate 	 * or if error-locked and ondisk is now clean
7137c478bd9Sstevel@tonic-gate 	 */
7147c478bd9Sstevel@tonic-gate 	needs_unlock = !MUTEX_HELD(&ufsvfsp->vfs_lock);
7157c478bd9Sstevel@tonic-gate 	if (needs_unlock)
7167c478bd9Sstevel@tonic-gate 		mutex_enter(&ufsvfsp->vfs_lock);
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate 	if (errlck == UN_ERRLCK) {
7197c478bd9Sstevel@tonic-gate 		if (finished_fsclean == dfs->fs_clean)
7207c478bd9Sstevel@tonic-gate 			mfs->fs_clean = finished_fsclean;
7217c478bd9Sstevel@tonic-gate 		else
7227c478bd9Sstevel@tonic-gate 			mfs->fs_clean = FSBAD;
7237c478bd9Sstevel@tonic-gate 		mfs->fs_state = FSOKAY - dfs->fs_time;
7247c478bd9Sstevel@tonic-gate 	}
7257c478bd9Sstevel@tonic-gate 
7267c478bd9Sstevel@tonic-gate 	if (FSOKAY != dfs->fs_state + dfs->fs_time ||
7277c478bd9Sstevel@tonic-gate 	    (dfs->fs_clean == FSBAD))
7287c478bd9Sstevel@tonic-gate 		mfs->fs_clean = FSBAD;
7297c478bd9Sstevel@tonic-gate 
7307c478bd9Sstevel@tonic-gate 	if (needs_unlock)
7317c478bd9Sstevel@tonic-gate 		mutex_exit(&ufsvfsp->vfs_lock);
7327c478bd9Sstevel@tonic-gate 
7337c478bd9Sstevel@tonic-gate 	brelse(bp);
7347c478bd9Sstevel@tonic-gate 
7357c478bd9Sstevel@tonic-gate 	return (0);
7367c478bd9Sstevel@tonic-gate }
7377c478bd9Sstevel@tonic-gate 
7387c478bd9Sstevel@tonic-gate /*
7397c478bd9Sstevel@tonic-gate  * ufs_reconcile_inode
7407c478bd9Sstevel@tonic-gate  *	reconcile ondisk inode with incore inode
7417c478bd9Sstevel@tonic-gate  */
7427c478bd9Sstevel@tonic-gate static int
ufs_reconcile_inode(struct inode * ip,void * arg)7437c478bd9Sstevel@tonic-gate ufs_reconcile_inode(struct inode *ip, void *arg)
7447c478bd9Sstevel@tonic-gate {
7457c478bd9Sstevel@tonic-gate 	int		i;
7467c478bd9Sstevel@tonic-gate 	int		ndaddr;
7477c478bd9Sstevel@tonic-gate 	int		niaddr;
7487c478bd9Sstevel@tonic-gate 	struct dinode	*dp;		/* ondisk inode */
7497c478bd9Sstevel@tonic-gate 	struct buf	*bp	= NULL;
7507c478bd9Sstevel@tonic-gate 	uid_t		d_uid;
7517c478bd9Sstevel@tonic-gate 	gid_t		d_gid;
7527c478bd9Sstevel@tonic-gate 	int		error = 0;
7537c478bd9Sstevel@tonic-gate 	struct fs	*fs;
7547c478bd9Sstevel@tonic-gate 
7557c478bd9Sstevel@tonic-gate 	/*
7567c478bd9Sstevel@tonic-gate 	 * not an inode we care about
7577c478bd9Sstevel@tonic-gate 	 */
7587c478bd9Sstevel@tonic-gate 	if (ip->i_ufsvfs != (struct ufsvfs *)arg)
7597c478bd9Sstevel@tonic-gate 		return (0);
7607c478bd9Sstevel@tonic-gate 
7617c478bd9Sstevel@tonic-gate 	fs = ip->i_fs;
7627c478bd9Sstevel@tonic-gate 
7637c478bd9Sstevel@tonic-gate 	/*
7647c478bd9Sstevel@tonic-gate 	 * Inode reconciliation fails: we made the filesystem quiescent
7657c478bd9Sstevel@tonic-gate 	 * and we did a ufs_flush() before calling ufs_reconcile_inode()
7667c478bd9Sstevel@tonic-gate 	 * and thus the inode should not have been changed inbetween.
7677c478bd9Sstevel@tonic-gate 	 * Any discrepancies indicate a logic error and a pretty
7687c478bd9Sstevel@tonic-gate 	 * significant run-state inconsistency we should complain about.
7697c478bd9Sstevel@tonic-gate 	 */
7707c478bd9Sstevel@tonic-gate 	if (ip->i_flag & (IMOD|IMODACC|IACC|IUPD|ICHG|IATTCHG)) {
7717c478bd9Sstevel@tonic-gate 		cmn_err(CE_WARN, "%s: Inode reconciliation failed for"
7727c478bd9Sstevel@tonic-gate 		    "inode %llu", fs->fs_fsmnt, (u_longlong_t)ip->i_number);
7737c478bd9Sstevel@tonic-gate 		return (EINVAL);
7747c478bd9Sstevel@tonic-gate 	}
7757c478bd9Sstevel@tonic-gate 
7767c478bd9Sstevel@tonic-gate 	/*
7777c478bd9Sstevel@tonic-gate 	 * get the dinode
7787c478bd9Sstevel@tonic-gate 	 */
7797c478bd9Sstevel@tonic-gate 	bp = UFS_BREAD(ip->i_ufsvfs,
78080d34432Sfrankho 	    ip->i_dev, (daddr_t)fsbtodb(fs, itod(fs, ip->i_number)),
7817c478bd9Sstevel@tonic-gate 	    (int)fs->fs_bsize);
7827c478bd9Sstevel@tonic-gate 	if (bp->b_flags & B_ERROR) {
7837c478bd9Sstevel@tonic-gate 		brelse(bp);
7847c478bd9Sstevel@tonic-gate 		return (EIO);
7857c478bd9Sstevel@tonic-gate 	}
7867c478bd9Sstevel@tonic-gate 	dp  = bp->b_un.b_dino;
7877c478bd9Sstevel@tonic-gate 	dp += itoo(fs, ip->i_number);
7887c478bd9Sstevel@tonic-gate 
7897c478bd9Sstevel@tonic-gate 	/*
7907c478bd9Sstevel@tonic-gate 	 * handle Sun's implementation of EFT
7917c478bd9Sstevel@tonic-gate 	 */
7927c478bd9Sstevel@tonic-gate 	d_uid = (dp->di_suid == UID_LONG) ? dp->di_uid : (uid_t)dp->di_suid;
7937c478bd9Sstevel@tonic-gate 	d_gid = (dp->di_sgid == GID_LONG) ? dp->di_gid : (uid_t)dp->di_sgid;
7947c478bd9Sstevel@tonic-gate 
7957c478bd9Sstevel@tonic-gate 	rw_enter(&ip->i_contents, RW_WRITER);
7967c478bd9Sstevel@tonic-gate 
7977c478bd9Sstevel@tonic-gate 	/*
7987c478bd9Sstevel@tonic-gate 	 * some fields are not allowed to change
7997c478bd9Sstevel@tonic-gate 	 */
8007c478bd9Sstevel@tonic-gate 	if ((ip->i_mode  != dp->di_mode) ||
8017c478bd9Sstevel@tonic-gate 	    (ip->i_gen   != dp->di_gen) ||
8027c478bd9Sstevel@tonic-gate 	    (ip->i_uid   != d_uid) ||
8037c478bd9Sstevel@tonic-gate 	    (ip->i_gid   != d_gid)) {
8047c478bd9Sstevel@tonic-gate 		error = EACCES;
8057c478bd9Sstevel@tonic-gate 		goto out;
8067c478bd9Sstevel@tonic-gate 	}
8077c478bd9Sstevel@tonic-gate 
8087c478bd9Sstevel@tonic-gate 	/*
8097c478bd9Sstevel@tonic-gate 	 * and some are allowed to change
8107c478bd9Sstevel@tonic-gate 	 */
8117c478bd9Sstevel@tonic-gate 	ip->i_size		= dp->di_size;
8127c478bd9Sstevel@tonic-gate 	ip->i_ic.ic_flags	= dp->di_ic.ic_flags;
8137c478bd9Sstevel@tonic-gate 	ip->i_blocks		= dp->di_blocks;
8147c478bd9Sstevel@tonic-gate 	ip->i_nlink		= dp->di_nlink;
8157c478bd9Sstevel@tonic-gate 	if (ip->i_flag & IFASTSYMLNK) {
8167c478bd9Sstevel@tonic-gate 		ndaddr = 1;
8177c478bd9Sstevel@tonic-gate 		niaddr = 0;
8187c478bd9Sstevel@tonic-gate 	} else {
8197c478bd9Sstevel@tonic-gate 		ndaddr = NDADDR;
8207c478bd9Sstevel@tonic-gate 		niaddr = NIADDR;
8217c478bd9Sstevel@tonic-gate 	}
8227c478bd9Sstevel@tonic-gate 	for (i = 0; i < ndaddr; ++i)
8237c478bd9Sstevel@tonic-gate 		ip->i_db[i] = dp->di_db[i];
8247c478bd9Sstevel@tonic-gate 	for (i = 0; i < niaddr; ++i)
8257c478bd9Sstevel@tonic-gate 		ip->i_ib[i] = dp->di_ib[i];
8267c478bd9Sstevel@tonic-gate 
8277c478bd9Sstevel@tonic-gate out:
8287c478bd9Sstevel@tonic-gate 	rw_exit(&ip->i_contents);
8297c478bd9Sstevel@tonic-gate 	brelse(bp);
8307c478bd9Sstevel@tonic-gate 	return (error);
8317c478bd9Sstevel@tonic-gate }
8327c478bd9Sstevel@tonic-gate 
8337c478bd9Sstevel@tonic-gate /*
8347c478bd9Sstevel@tonic-gate  * ufs_reconcile
8357c478bd9Sstevel@tonic-gate  *	reconcile ondisk superblock/inodes with any incore
8367c478bd9Sstevel@tonic-gate  */
8377c478bd9Sstevel@tonic-gate static int
ufs_reconcile(struct vfs * vfsp,struct ufsvfs * ufsvfsp,int errlck)8387c478bd9Sstevel@tonic-gate ufs_reconcile(struct vfs *vfsp, struct ufsvfs *ufsvfsp, int errlck)
8397c478bd9Sstevel@tonic-gate {
8407c478bd9Sstevel@tonic-gate 	int	error = 0;
8417c478bd9Sstevel@tonic-gate 
8427c478bd9Sstevel@tonic-gate 	/*
8437c478bd9Sstevel@tonic-gate 	 * get rid of as much inmemory data as possible
8447c478bd9Sstevel@tonic-gate 	 */
8457c478bd9Sstevel@tonic-gate 	(void) ufs_flush(vfsp);
8467c478bd9Sstevel@tonic-gate 
8477c478bd9Sstevel@tonic-gate 	/*
8487c478bd9Sstevel@tonic-gate 	 * reconcile the superblock and inodes
8497c478bd9Sstevel@tonic-gate 	 */
8507c478bd9Sstevel@tonic-gate 	if (error = ufs_reconcile_fs(vfsp, ufsvfsp, errlck))
8517c478bd9Sstevel@tonic-gate 		return (error);
8527c478bd9Sstevel@tonic-gate 	if (error = ufs_scan_inodes(0, ufs_reconcile_inode, ufsvfsp, ufsvfsp))
8537c478bd9Sstevel@tonic-gate 		return (error);
8547c478bd9Sstevel@tonic-gate 	/*
8557c478bd9Sstevel@tonic-gate 	 * allocation blocks may be incorrect; get rid of them
8567c478bd9Sstevel@tonic-gate 	 */
8577c478bd9Sstevel@tonic-gate 	(void) ufs_flush(vfsp);
8587c478bd9Sstevel@tonic-gate 
8597c478bd9Sstevel@tonic-gate 	return (error);
8607c478bd9Sstevel@tonic-gate }
8617c478bd9Sstevel@tonic-gate 
8627c478bd9Sstevel@tonic-gate /*
8637c478bd9Sstevel@tonic-gate  * File system locking
8647c478bd9Sstevel@tonic-gate  */
8657c478bd9Sstevel@tonic-gate int
ufs_fiolfs(struct vnode * vp,struct lockfs * lockfsp,int from_log)8667c478bd9Sstevel@tonic-gate ufs_fiolfs(struct vnode *vp, struct lockfs *lockfsp, int from_log)
8677c478bd9Sstevel@tonic-gate {
8687c478bd9Sstevel@tonic-gate 	return (ufs__fiolfs(vp, lockfsp, /* from_user */ 1, from_log));
8697c478bd9Sstevel@tonic-gate }
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate /* kernel-internal interface, also used by fix-on-panic */
8727c478bd9Sstevel@tonic-gate int
ufs__fiolfs(struct vnode * vp,struct lockfs * lockfsp,int from_user,int from_log)8737c478bd9Sstevel@tonic-gate ufs__fiolfs(
8747c478bd9Sstevel@tonic-gate 	struct vnode *vp,
8757c478bd9Sstevel@tonic-gate 	struct lockfs *lockfsp,
8767c478bd9Sstevel@tonic-gate 	int from_user,
8777c478bd9Sstevel@tonic-gate 	int from_log)
8787c478bd9Sstevel@tonic-gate {
8797c478bd9Sstevel@tonic-gate 	struct ulockfs	*ulp;
8807c478bd9Sstevel@tonic-gate 	struct lockfs	lfs;
8817c478bd9Sstevel@tonic-gate 	int		error;
8827c478bd9Sstevel@tonic-gate 	struct vfs	*vfsp;
8837c478bd9Sstevel@tonic-gate 	struct ufsvfs	*ufsvfsp;
8847c478bd9Sstevel@tonic-gate 	int		 errlck		= NO_ERRLCK;
8857c478bd9Sstevel@tonic-gate 	int		 poll_events	= POLLPRI;
8867c478bd9Sstevel@tonic-gate 	extern struct pollhead ufs_pollhd;
887303bf60bSsdebnath 	ulockfs_info_t *head;
888303bf60bSsdebnath 	ulockfs_info_t *info;
88946ac4468Smishra 	int signal = 0;
8907c478bd9Sstevel@tonic-gate 
8917c478bd9Sstevel@tonic-gate 	/* check valid lock type */
8927c478bd9Sstevel@tonic-gate 	if (!lockfsp || lockfsp->lf_lock > LOCKFS_MAXLOCK)
8937c478bd9Sstevel@tonic-gate 		return (EINVAL);
8947c478bd9Sstevel@tonic-gate 
8957c478bd9Sstevel@tonic-gate 	if (!vp || !vp->v_vfsp || !vp->v_vfsp->vfs_data)
8967c478bd9Sstevel@tonic-gate 		return (EIO);
8977c478bd9Sstevel@tonic-gate 
89813237b7eSbatschul 	vfsp = vp->v_vfsp;
89913237b7eSbatschul 
90013237b7eSbatschul 	if (vfsp->vfs_flag & VFS_UNMOUNTED) /* has been unmounted */
90102ffed0eSjr 		return (EIO);
90202ffed0eSjr 
90302ffed0eSjr 	/* take the lock and check again */
90413237b7eSbatschul 	vfs_lock_wait(vfsp);
90513237b7eSbatschul 	if (vfsp->vfs_flag & VFS_UNMOUNTED) {
90613237b7eSbatschul 		vfs_unlock(vfsp);
90702ffed0eSjr 		return (EIO);
90802ffed0eSjr 	}
90902ffed0eSjr 
91013237b7eSbatschul 	/*
91113237b7eSbatschul 	 * Can't wlock or ro/elock fs with accounting or local swap file
91213237b7eSbatschul 	 * We need to check for this before we grab the ul_lock to avoid
91313237b7eSbatschul 	 * deadlocks with the accounting framework.
91413237b7eSbatschul 	 */
91513237b7eSbatschul 	if ((LOCKFS_IS_WLOCK(lockfsp) || LOCKFS_IS_ELOCK(lockfsp) ||
91613237b7eSbatschul 	    LOCKFS_IS_ROELOCK(lockfsp)) && !from_log) {
91713237b7eSbatschul 		if (ufs_checkaccton(vp) || ufs_checkswapon(vp)) {
91813237b7eSbatschul 			vfs_unlock(vfsp);
91913237b7eSbatschul 			return (EDEADLK);
92013237b7eSbatschul 		}
92113237b7eSbatschul 	}
92213237b7eSbatschul 
9237c478bd9Sstevel@tonic-gate 	ufsvfsp = (struct ufsvfs *)vfsp->vfs_data;
9247c478bd9Sstevel@tonic-gate 	ulp = &ufsvfsp->vfs_ulockfs;
925303bf60bSsdebnath 	head = (ulockfs_info_t *)tsd_get(ufs_lockfs_key);
926303bf60bSsdebnath 	SEARCH_ULOCKFSP(head, ulp, info);
927303bf60bSsdebnath 
9287c478bd9Sstevel@tonic-gate 	/*
9297c478bd9Sstevel@tonic-gate 	 * Suspend both the reclaim thread and the delete thread.
9307c478bd9Sstevel@tonic-gate 	 * This must be done outside the lockfs locking protocol.
9317c478bd9Sstevel@tonic-gate 	 */
9327c478bd9Sstevel@tonic-gate 	ufs_thread_suspend(&ufsvfsp->vfs_reclaim);
9337c478bd9Sstevel@tonic-gate 	ufs_thread_suspend(&ufsvfsp->vfs_delete);
9347c478bd9Sstevel@tonic-gate 
9357c478bd9Sstevel@tonic-gate 	mutex_enter(&ulp->ul_lock);
936*1a5e258fSJosef 'Jeff' Sipek 	atomic_inc_ulong(&ufs_quiesce_pend);
9377c478bd9Sstevel@tonic-gate 
9387c478bd9Sstevel@tonic-gate 	/*
9397c478bd9Sstevel@tonic-gate 	 * Quit if there is another lockfs request in progress
9407c478bd9Sstevel@tonic-gate 	 * that is waiting for existing ufs_vnops to complete.
9417c478bd9Sstevel@tonic-gate 	 */
9427c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_BUSY(ulp)) {
9437c478bd9Sstevel@tonic-gate 		error = EBUSY;
9447c478bd9Sstevel@tonic-gate 		goto errexit;
9457c478bd9Sstevel@tonic-gate 	}
9467c478bd9Sstevel@tonic-gate 
9477c478bd9Sstevel@tonic-gate 	/* cannot ulocked or downgrade a hard-lock */
9487c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_HLOCK(ulp)) {
9497c478bd9Sstevel@tonic-gate 		error = EIO;
9507c478bd9Sstevel@tonic-gate 		goto errexit;
9517c478bd9Sstevel@tonic-gate 	}
9527c478bd9Sstevel@tonic-gate 
9537c478bd9Sstevel@tonic-gate 	/* an error lock may be unlocked or relocked, only */
9547c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_ELOCK(ulp)) {
9557c478bd9Sstevel@tonic-gate 		if (!LOCKFS_IS_ULOCK(lockfsp) && !LOCKFS_IS_ELOCK(lockfsp)) {
9567c478bd9Sstevel@tonic-gate 			error = EBUSY;
9577c478bd9Sstevel@tonic-gate 			goto errexit;
9587c478bd9Sstevel@tonic-gate 		}
9597c478bd9Sstevel@tonic-gate 	}
9607c478bd9Sstevel@tonic-gate 
9617c478bd9Sstevel@tonic-gate 	/*
9627c478bd9Sstevel@tonic-gate 	 * a read-only error lock may only be upgraded to an
9637c478bd9Sstevel@tonic-gate 	 * error lock or hard lock
9647c478bd9Sstevel@tonic-gate 	 */
9657c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_ROELOCK(ulp)) {
9667c478bd9Sstevel@tonic-gate 		if (!LOCKFS_IS_HLOCK(lockfsp) && !LOCKFS_IS_ELOCK(lockfsp)) {
9677c478bd9Sstevel@tonic-gate 			error = EBUSY;
9687c478bd9Sstevel@tonic-gate 			goto errexit;
9697c478bd9Sstevel@tonic-gate 		}
9707c478bd9Sstevel@tonic-gate 	}
9717c478bd9Sstevel@tonic-gate 
9727c478bd9Sstevel@tonic-gate 	/*
9737c478bd9Sstevel@tonic-gate 	 * until read-only error locks are fully implemented
9747c478bd9Sstevel@tonic-gate 	 * just return EINVAL
9757c478bd9Sstevel@tonic-gate 	 */
9767c478bd9Sstevel@tonic-gate 	if (LOCKFS_IS_ROELOCK(lockfsp)) {
9777c478bd9Sstevel@tonic-gate 		error = EINVAL;
9787c478bd9Sstevel@tonic-gate 		goto errexit;
9797c478bd9Sstevel@tonic-gate 	}
9807c478bd9Sstevel@tonic-gate 
9817c478bd9Sstevel@tonic-gate 	/*
9827c478bd9Sstevel@tonic-gate 	 * an error lock may only be applied if the file system is
9837c478bd9Sstevel@tonic-gate 	 * unlocked or already error locked.
9847c478bd9Sstevel@tonic-gate 	 * (this is to prevent the case where a fs gets changed out from
9857c478bd9Sstevel@tonic-gate 	 * underneath a fs that is locked for backup,
9867c478bd9Sstevel@tonic-gate 	 * that is, name/delete/write-locked.)
9877c478bd9Sstevel@tonic-gate 	 */
9887c478bd9Sstevel@tonic-gate 	if ((!ULOCKFS_IS_ULOCK(ulp) && !ULOCKFS_IS_ELOCK(ulp) &&
9897c478bd9Sstevel@tonic-gate 	    !ULOCKFS_IS_ROELOCK(ulp)) &&
9907c478bd9Sstevel@tonic-gate 	    (LOCKFS_IS_ELOCK(lockfsp) || LOCKFS_IS_ROELOCK(lockfsp))) {
9917c478bd9Sstevel@tonic-gate 		error = EBUSY;
9927c478bd9Sstevel@tonic-gate 		goto errexit;
9937c478bd9Sstevel@tonic-gate 	}
9947c478bd9Sstevel@tonic-gate 
9957c478bd9Sstevel@tonic-gate 	/* get and validate the input lockfs request */
9967c478bd9Sstevel@tonic-gate 	if (error = ufs_getlfd(lockfsp, &ulp->ul_lockfs))
9977c478bd9Sstevel@tonic-gate 		goto errexit;
9987c478bd9Sstevel@tonic-gate 
9997c478bd9Sstevel@tonic-gate 	/*
10007c478bd9Sstevel@tonic-gate 	 * save current ulockfs struct
10017c478bd9Sstevel@tonic-gate 	 */
10027c478bd9Sstevel@tonic-gate 	bcopy(&ulp->ul_lockfs, &lfs, sizeof (struct lockfs));
10037c478bd9Sstevel@tonic-gate 
10047c478bd9Sstevel@tonic-gate 	/*
10057c478bd9Sstevel@tonic-gate 	 * Freeze the file system (pend future accesses)
10067c478bd9Sstevel@tonic-gate 	 */
10077c478bd9Sstevel@tonic-gate 	ufs_freeze(ulp, lockfsp);
10087c478bd9Sstevel@tonic-gate 
10097c478bd9Sstevel@tonic-gate 	/*
10107c478bd9Sstevel@tonic-gate 	 * Set locking in progress because ufs_quiesce may free the
10117c478bd9Sstevel@tonic-gate 	 * ul_lock mutex.
10127c478bd9Sstevel@tonic-gate 	 */
10137c478bd9Sstevel@tonic-gate 	ULOCKFS_SET_BUSY(ulp);
10147c478bd9Sstevel@tonic-gate 	/* update the ioctl copy */
10157c478bd9Sstevel@tonic-gate 	LOCKFS_SET_BUSY(&ulp->ul_lockfs);
10167c478bd9Sstevel@tonic-gate 
1017303bf60bSsdebnath 	/*
1018303bf60bSsdebnath 	 * We  need to unset FWLOCK status before we call ufs_quiesce
1019303bf60bSsdebnath 	 * so that the thread doesnt get suspended. We do this only if
1020303bf60bSsdebnath 	 * this (fallocate) thread requested an unlock operation.
1021303bf60bSsdebnath 	 */
1022303bf60bSsdebnath 	if (info && (info->flags & ULOCK_INFO_FALLOCATE)) {
1023303bf60bSsdebnath 		if (!ULOCKFS_IS_WLOCK(ulp))
1024303bf60bSsdebnath 			ULOCKFS_CLR_FWLOCK(ulp);
1025303bf60bSsdebnath 	}
1026303bf60bSsdebnath 
10277c478bd9Sstevel@tonic-gate 	/*
10287c478bd9Sstevel@tonic-gate 	 * Quiesce (wait for outstanding accesses to finish)
10297c478bd9Sstevel@tonic-gate 	 */
103046ac4468Smishra 	if (error = ufs_quiesce(ulp)) {
103146ac4468Smishra 		/*
103246ac4468Smishra 		 * Interrupted due to signal. There could still be
103346ac4468Smishra 		 * pending vnops.
103446ac4468Smishra 		 */
103546ac4468Smishra 		signal = 1;
103646ac4468Smishra 
103746ac4468Smishra 		/*
103846ac4468Smishra 		 * We do broadcast because lock-status
103946ac4468Smishra 		 * could be reverted to old status.
104046ac4468Smishra 		 */
104146ac4468Smishra 		cv_broadcast(&ulp->ul_cv);
10427c478bd9Sstevel@tonic-gate 		goto errout;
104346ac4468Smishra 	}
10447c478bd9Sstevel@tonic-gate 
1045303bf60bSsdebnath 	/*
1046303bf60bSsdebnath 	 * If the fallocate thread requested a write fs lock operation
1047303bf60bSsdebnath 	 * then we set fwlock status in the ulp.
1048303bf60bSsdebnath 	 */
1049303bf60bSsdebnath 	if (info && (info->flags & ULOCK_INFO_FALLOCATE)) {
1050303bf60bSsdebnath 		if (ULOCKFS_IS_WLOCK(ulp))
1051303bf60bSsdebnath 			ULOCKFS_SET_FWLOCK(ulp);
1052303bf60bSsdebnath 	}
1053303bf60bSsdebnath 
10547c478bd9Sstevel@tonic-gate 	/*
10557c478bd9Sstevel@tonic-gate 	 * save error lock status to pass down to reconcilation
10567c478bd9Sstevel@tonic-gate 	 * routines and for later cleanup
10577c478bd9Sstevel@tonic-gate 	 */
10587c478bd9Sstevel@tonic-gate 	if (LOCKFS_IS_ELOCK(&lfs) && ULOCKFS_IS_ULOCK(ulp))
10597c478bd9Sstevel@tonic-gate 		errlck = UN_ERRLCK;
10607c478bd9Sstevel@tonic-gate 
10617c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_ELOCK(ulp) || ULOCKFS_IS_ROELOCK(ulp)) {
10627c478bd9Sstevel@tonic-gate 		int needs_unlock;
10637c478bd9Sstevel@tonic-gate 		int needs_sbwrite;
10647c478bd9Sstevel@tonic-gate 
10657c478bd9Sstevel@tonic-gate 		poll_events |= POLLERR;
106680d34432Sfrankho 		errlck = LOCKFS_IS_ELOCK(&lfs) || LOCKFS_IS_ROELOCK(&lfs) ?
106780d34432Sfrankho 		    RE_ERRLCK : SET_ERRLCK;
10687c478bd9Sstevel@tonic-gate 
10697c478bd9Sstevel@tonic-gate 		needs_unlock = !MUTEX_HELD(&ufsvfsp->vfs_lock);
10707c478bd9Sstevel@tonic-gate 		if (needs_unlock)
10717c478bd9Sstevel@tonic-gate 			mutex_enter(&ufsvfsp->vfs_lock);
10727c478bd9Sstevel@tonic-gate 
10737c478bd9Sstevel@tonic-gate 		/* disable delayed i/o */
10747c478bd9Sstevel@tonic-gate 		needs_sbwrite = 0;
10757c478bd9Sstevel@tonic-gate 
10767c478bd9Sstevel@tonic-gate 		if (errlck == SET_ERRLCK) {
10777c478bd9Sstevel@tonic-gate 			ufsvfsp->vfs_fs->fs_clean = FSBAD;
10787c478bd9Sstevel@tonic-gate 			needs_sbwrite = 1;
10797c478bd9Sstevel@tonic-gate 		}
10807c478bd9Sstevel@tonic-gate 
10817c478bd9Sstevel@tonic-gate 		needs_sbwrite |= ufsvfsp->vfs_dio;
10827c478bd9Sstevel@tonic-gate 		ufsvfsp->vfs_dio = 0;
10837c478bd9Sstevel@tonic-gate 
10847c478bd9Sstevel@tonic-gate 		if (needs_unlock)
10857c478bd9Sstevel@tonic-gate 			mutex_exit(&ufsvfsp->vfs_lock);
10867c478bd9Sstevel@tonic-gate 
10877c478bd9Sstevel@tonic-gate 		if (needs_sbwrite) {
10887c478bd9Sstevel@tonic-gate 			ulp->ul_sbowner = curthread;
10897c478bd9Sstevel@tonic-gate 			TRANS_SBWRITE(ufsvfsp, TOP_SBWRITE_STABLE);
10907c478bd9Sstevel@tonic-gate 
10917c478bd9Sstevel@tonic-gate 			if (needs_unlock)
10927c478bd9Sstevel@tonic-gate 				mutex_enter(&ufsvfsp->vfs_lock);
10937c478bd9Sstevel@tonic-gate 
10947c478bd9Sstevel@tonic-gate 			ufsvfsp->vfs_fs->fs_fmod = 0;
10957c478bd9Sstevel@tonic-gate 
10967c478bd9Sstevel@tonic-gate 			if (needs_unlock)
10977c478bd9Sstevel@tonic-gate 				mutex_exit(&ufsvfsp->vfs_lock);
10987c478bd9Sstevel@tonic-gate 		}
10997c478bd9Sstevel@tonic-gate 	}
11007c478bd9Sstevel@tonic-gate 
11017c478bd9Sstevel@tonic-gate 	/*
11027c478bd9Sstevel@tonic-gate 	 * reconcile superblock and inodes if was wlocked
11037c478bd9Sstevel@tonic-gate 	 */
11047c478bd9Sstevel@tonic-gate 	if (LOCKFS_IS_WLOCK(&lfs) || LOCKFS_IS_ELOCK(&lfs)) {
11057c478bd9Sstevel@tonic-gate 		if (error = ufs_reconcile(vfsp, ufsvfsp, errlck))
11067c478bd9Sstevel@tonic-gate 			goto errout;
11077c478bd9Sstevel@tonic-gate 		/*
11087c478bd9Sstevel@tonic-gate 		 * in case the fs grew; reset the metadata map for logging tests
11097c478bd9Sstevel@tonic-gate 		 */
11107c478bd9Sstevel@tonic-gate 		TRANS_MATA_UMOUNT(ufsvfsp);
11117c478bd9Sstevel@tonic-gate 		TRANS_MATA_MOUNT(ufsvfsp);
11127c478bd9Sstevel@tonic-gate 		TRANS_MATA_SI(ufsvfsp, ufsvfsp->vfs_fs);
11137c478bd9Sstevel@tonic-gate 	}
11147c478bd9Sstevel@tonic-gate 
11157c478bd9Sstevel@tonic-gate 	/*
11167c478bd9Sstevel@tonic-gate 	 * At least everything *currently* dirty goes out.
11177c478bd9Sstevel@tonic-gate 	 */
11187c478bd9Sstevel@tonic-gate 
11197c478bd9Sstevel@tonic-gate 	if ((error = ufs_flush(vfsp)) != 0 && !ULOCKFS_IS_HLOCK(ulp) &&
11207c478bd9Sstevel@tonic-gate 	    !ULOCKFS_IS_ELOCK(ulp))
11217c478bd9Sstevel@tonic-gate 		goto errout;
11227c478bd9Sstevel@tonic-gate 
11237c478bd9Sstevel@tonic-gate 	/*
11247c478bd9Sstevel@tonic-gate 	 * thaw file system and wakeup pended processes
11257c478bd9Sstevel@tonic-gate 	 */
11267c478bd9Sstevel@tonic-gate 	if (error = ufs_thaw(vfsp, ufsvfsp, ulp))
11277c478bd9Sstevel@tonic-gate 		if (!ULOCKFS_IS_HLOCK(ulp) && !ULOCKFS_IS_ELOCK(ulp))
11287c478bd9Sstevel@tonic-gate 			goto errout;
11297c478bd9Sstevel@tonic-gate 
11307c478bd9Sstevel@tonic-gate 	/*
11317c478bd9Sstevel@tonic-gate 	 * reset modified flag if not already write locked
11327c478bd9Sstevel@tonic-gate 	 */
11337c478bd9Sstevel@tonic-gate 	if (!LOCKFS_IS_WLOCK(&lfs))
11347c478bd9Sstevel@tonic-gate 		ULOCKFS_CLR_MOD(ulp);
11357c478bd9Sstevel@tonic-gate 
11367c478bd9Sstevel@tonic-gate 	/*
11377c478bd9Sstevel@tonic-gate 	 * idle the lock struct
11387c478bd9Sstevel@tonic-gate 	 */
11397c478bd9Sstevel@tonic-gate 	ULOCKFS_CLR_BUSY(ulp);
11407c478bd9Sstevel@tonic-gate 	/* update the ioctl copy */
11417c478bd9Sstevel@tonic-gate 	LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
11427c478bd9Sstevel@tonic-gate 
11437c478bd9Sstevel@tonic-gate 	/*
11447c478bd9Sstevel@tonic-gate 	 * free current comment
11457c478bd9Sstevel@tonic-gate 	 */
11467c478bd9Sstevel@tonic-gate 	if (lfs.lf_comment && lfs.lf_comlen != 0) {
11477c478bd9Sstevel@tonic-gate 		kmem_free(lfs.lf_comment, lfs.lf_comlen);
11487c478bd9Sstevel@tonic-gate 		lfs.lf_comment = NULL;
11497c478bd9Sstevel@tonic-gate 		lfs.lf_comlen = 0;
11507c478bd9Sstevel@tonic-gate 	}
11517c478bd9Sstevel@tonic-gate 
11527c478bd9Sstevel@tonic-gate 	/* do error lock cleanup */
11537c478bd9Sstevel@tonic-gate 	if (errlck == UN_ERRLCK)
11547c478bd9Sstevel@tonic-gate 		ufsfx_unlockfs(ufsvfsp);
11557c478bd9Sstevel@tonic-gate 
11567c478bd9Sstevel@tonic-gate 	else if (errlck == RE_ERRLCK)
11577c478bd9Sstevel@tonic-gate 		ufsfx_lockfs(ufsvfsp);
11587c478bd9Sstevel@tonic-gate 
11597c478bd9Sstevel@tonic-gate 	/* don't allow error lock from user to invoke panic */
11607c478bd9Sstevel@tonic-gate 	else if (from_user && errlck == SET_ERRLCK &&
116180d34432Sfrankho 	    !(ufsvfsp->vfs_fsfx.fx_flags & (UFSMNT_ONERROR_PANIC >> 4)))
11627c478bd9Sstevel@tonic-gate 		(void) ufs_fault(ufsvfsp->vfs_root,
11637c478bd9Sstevel@tonic-gate 		    ulp->ul_lockfs.lf_comment && ulp->ul_lockfs.lf_comlen > 0 ?
11647c478bd9Sstevel@tonic-gate 		    ulp->ul_lockfs.lf_comment: "user-applied error lock");
11657c478bd9Sstevel@tonic-gate 
1166*1a5e258fSJosef 'Jeff' Sipek 	atomic_dec_ulong(&ufs_quiesce_pend);
11677c478bd9Sstevel@tonic-gate 	mutex_exit(&ulp->ul_lock);
11687c478bd9Sstevel@tonic-gate 	vfs_unlock(vfsp);
11697c478bd9Sstevel@tonic-gate 
11707c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs))
11717c478bd9Sstevel@tonic-gate 		poll_events |= POLLERR;
11727c478bd9Sstevel@tonic-gate 
11737c478bd9Sstevel@tonic-gate 	pollwakeup(&ufs_pollhd, poll_events);
11747c478bd9Sstevel@tonic-gate 
11757c478bd9Sstevel@tonic-gate 	/*
11767c478bd9Sstevel@tonic-gate 	 * Allow both the delete thread and the reclaim thread to
11777c478bd9Sstevel@tonic-gate 	 * continue.
11787c478bd9Sstevel@tonic-gate 	 */
11797c478bd9Sstevel@tonic-gate 	ufs_thread_continue(&ufsvfsp->vfs_delete);
11807c478bd9Sstevel@tonic-gate 	ufs_thread_continue(&ufsvfsp->vfs_reclaim);
11817c478bd9Sstevel@tonic-gate 
11827c478bd9Sstevel@tonic-gate 	return (0);
11837c478bd9Sstevel@tonic-gate 
11847c478bd9Sstevel@tonic-gate errout:
11857c478bd9Sstevel@tonic-gate 	/*
11867c478bd9Sstevel@tonic-gate 	 * Lock failed. Reset the old lock in ufsvfs if not hard locked.
11877c478bd9Sstevel@tonic-gate 	 */
11887c478bd9Sstevel@tonic-gate 	if (!LOCKFS_IS_HLOCK(&ulp->ul_lockfs)) {
11897c478bd9Sstevel@tonic-gate 		bcopy(&lfs, &ulp->ul_lockfs, sizeof (struct lockfs));
11907c478bd9Sstevel@tonic-gate 		ulp->ul_fs_lock = (1 << lfs.lf_lock);
11917c478bd9Sstevel@tonic-gate 	}
119246ac4468Smishra 
119346ac4468Smishra 	/*
119446ac4468Smishra 	 * Don't call ufs_thaw() when there's a signal during
119546ac4468Smishra 	 * ufs quiesce operation as it can lead to deadlock
119646ac4468Smishra 	 * with getpage.
119746ac4468Smishra 	 */
119846ac4468Smishra 	if (signal == 0)
119946ac4468Smishra 		(void) ufs_thaw(vfsp, ufsvfsp, ulp);
120046ac4468Smishra 
12017c478bd9Sstevel@tonic-gate 	ULOCKFS_CLR_BUSY(ulp);
12027c478bd9Sstevel@tonic-gate 	LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
12037c478bd9Sstevel@tonic-gate 
12047c478bd9Sstevel@tonic-gate errexit:
1205*1a5e258fSJosef 'Jeff' Sipek 	atomic_dec_ulong(&ufs_quiesce_pend);
12067c478bd9Sstevel@tonic-gate 	mutex_exit(&ulp->ul_lock);
12077c478bd9Sstevel@tonic-gate 	vfs_unlock(vfsp);
12087c478bd9Sstevel@tonic-gate 
12097c478bd9Sstevel@tonic-gate 	/*
12107c478bd9Sstevel@tonic-gate 	 * Allow both the delete thread and the reclaim thread to
12117c478bd9Sstevel@tonic-gate 	 * continue.
12127c478bd9Sstevel@tonic-gate 	 */
12137c478bd9Sstevel@tonic-gate 	ufs_thread_continue(&ufsvfsp->vfs_delete);
12147c478bd9Sstevel@tonic-gate 	ufs_thread_continue(&ufsvfsp->vfs_reclaim);
12157c478bd9Sstevel@tonic-gate 
12167c478bd9Sstevel@tonic-gate 	return (error);
12177c478bd9Sstevel@tonic-gate }
12187c478bd9Sstevel@tonic-gate 
12197c478bd9Sstevel@tonic-gate /*
12207c478bd9Sstevel@tonic-gate  * fiolfss
12217c478bd9Sstevel@tonic-gate  * 	return the current file system locking state info
12227c478bd9Sstevel@tonic-gate  */
12237c478bd9Sstevel@tonic-gate int
ufs_fiolfss(struct vnode * vp,struct lockfs * lockfsp)12247c478bd9Sstevel@tonic-gate ufs_fiolfss(struct vnode *vp, struct lockfs *lockfsp)
12257c478bd9Sstevel@tonic-gate {
12267c478bd9Sstevel@tonic-gate 	struct ulockfs	*ulp;
12277c478bd9Sstevel@tonic-gate 
12287c478bd9Sstevel@tonic-gate 	if (!vp || !vp->v_vfsp || !VTOI(vp))
12297c478bd9Sstevel@tonic-gate 		return (EINVAL);
12307c478bd9Sstevel@tonic-gate 
12317c478bd9Sstevel@tonic-gate 	/* file system has been forcibly unmounted */
12327c478bd9Sstevel@tonic-gate 	if (VTOI(vp)->i_ufsvfs == NULL)
12337c478bd9Sstevel@tonic-gate 		return (EIO);
12347c478bd9Sstevel@tonic-gate 
12357c478bd9Sstevel@tonic-gate 	ulp = VTOUL(vp);
12367c478bd9Sstevel@tonic-gate 
12377c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_HLOCK(ulp)) {
12387c478bd9Sstevel@tonic-gate 		*lockfsp = ulp->ul_lockfs;	/* structure assignment */
12397c478bd9Sstevel@tonic-gate 		return (0);
12407c478bd9Sstevel@tonic-gate 	}
12417c478bd9Sstevel@tonic-gate 
12427c478bd9Sstevel@tonic-gate 	mutex_enter(&ulp->ul_lock);
12437c478bd9Sstevel@tonic-gate 
12447c478bd9Sstevel@tonic-gate 	*lockfsp = ulp->ul_lockfs;	/* structure assignment */
12457c478bd9Sstevel@tonic-gate 
12467c478bd9Sstevel@tonic-gate 	if (ULOCKFS_IS_MOD(ulp))
12477c478bd9Sstevel@tonic-gate 		lockfsp->lf_flags |= LOCKFS_MOD;
12487c478bd9Sstevel@tonic-gate 
12497c478bd9Sstevel@tonic-gate 	mutex_exit(&ulp->ul_lock);
12507c478bd9Sstevel@tonic-gate 
12517c478bd9Sstevel@tonic-gate 	return (0);
12527c478bd9Sstevel@tonic-gate }
12537c478bd9Sstevel@tonic-gate 
12547c478bd9Sstevel@tonic-gate /*
12557c478bd9Sstevel@tonic-gate  * ufs_check_lockfs
12567c478bd9Sstevel@tonic-gate  *	check whether a ufs_vnops conflicts with the file system lock
12577c478bd9Sstevel@tonic-gate  */
12587c478bd9Sstevel@tonic-gate int
ufs_check_lockfs(struct ufsvfs * ufsvfsp,struct ulockfs * ulp,ulong_t mask)12597c478bd9Sstevel@tonic-gate ufs_check_lockfs(struct ufsvfs *ufsvfsp, struct ulockfs *ulp, ulong_t mask)
12607c478bd9Sstevel@tonic-gate {
12617c478bd9Sstevel@tonic-gate 	k_sigset_t	smask;
12627c478bd9Sstevel@tonic-gate 	int		sig, slock;
12637c478bd9Sstevel@tonic-gate 
12647c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&ulp->ul_lock));
12657c478bd9Sstevel@tonic-gate 
12667c478bd9Sstevel@tonic-gate 	while (ulp->ul_fs_lock & mask) {
12677c478bd9Sstevel@tonic-gate 		slock = (int)ULOCKFS_IS_SLOCK(ulp);
12687c478bd9Sstevel@tonic-gate 		if ((curthread->t_flag & T_DONTPEND) && !slock) {
12697c478bd9Sstevel@tonic-gate 			curthread->t_flag |= T_WOULDBLOCK;
12707c478bd9Sstevel@tonic-gate 			return (EAGAIN);
12717c478bd9Sstevel@tonic-gate 		}
12727c478bd9Sstevel@tonic-gate 		curthread->t_flag &= ~T_WOULDBLOCK;
12737c478bd9Sstevel@tonic-gate 
127466c9f83dSowenr 		/*
127566c9f83dSowenr 		 * In the case of an onerr umount of the fs, threads could
127666c9f83dSowenr 		 * have blocked before coming into ufs_check_lockfs and
127766c9f83dSowenr 		 * need to check for the special case of ELOCK and
127866c9f83dSowenr 		 * vfs_dontblock being set which would indicate that the fs
127966c9f83dSowenr 		 * is on its way out and will not return therefore making
128066c9f83dSowenr 		 * EIO the appropriate response.
128166c9f83dSowenr 		 */
128266c9f83dSowenr 		if (ULOCKFS_IS_HLOCK(ulp) ||
128366c9f83dSowenr 		    (ULOCKFS_IS_ELOCK(ulp) && ufsvfsp->vfs_dontblock))
12847c478bd9Sstevel@tonic-gate 			return (EIO);
12857c478bd9Sstevel@tonic-gate 
12867c478bd9Sstevel@tonic-gate 		/*
12877c478bd9Sstevel@tonic-gate 		 * wait for lock status to change
12887c478bd9Sstevel@tonic-gate 		 */
12897c478bd9Sstevel@tonic-gate 		if (slock || ufsvfsp->vfs_nointr) {
12907c478bd9Sstevel@tonic-gate 			cv_wait(&ulp->ul_cv, &ulp->ul_lock);
12917c478bd9Sstevel@tonic-gate 		} else {
12927c478bd9Sstevel@tonic-gate 			sigintr(&smask, 1);
12937c478bd9Sstevel@tonic-gate 			sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
12947c478bd9Sstevel@tonic-gate 			sigunintr(&smask);
12957c478bd9Sstevel@tonic-gate 			if ((!sig && (ulp->ul_fs_lock & mask)) ||
129680d34432Sfrankho 			    ufsvfsp->vfs_dontblock)
12977c478bd9Sstevel@tonic-gate 				return (EINTR);
12987c478bd9Sstevel@tonic-gate 		}
12997c478bd9Sstevel@tonic-gate 	}
1300303bf60bSsdebnath 
1301303bf60bSsdebnath 	if (mask & ULOCKFS_FWLOCK) {
1302*1a5e258fSJosef 'Jeff' Sipek 		atomic_inc_ulong(&ulp->ul_falloc_cnt);
1303303bf60bSsdebnath 		ULOCKFS_SET_FALLOC(ulp);
1304303bf60bSsdebnath 	} else {
1305*1a5e258fSJosef 'Jeff' Sipek 		atomic_inc_ulong(&ulp->ul_vnops_cnt);
1306303bf60bSsdebnath 	}
1307303bf60bSsdebnath 
13087c478bd9Sstevel@tonic-gate 	return (0);
13097c478bd9Sstevel@tonic-gate }
13107c478bd9Sstevel@tonic-gate 
13117c478bd9Sstevel@tonic-gate /*
13127c478bd9Sstevel@tonic-gate  * Check whether we came across the handcrafted lockfs protocol path. We can't
13137c478bd9Sstevel@tonic-gate  * simply check for T_DONTBLOCK here as one would assume since this can also
13147c478bd9Sstevel@tonic-gate  * falsely catch recursive VOP's going to a different filesystem, instead we
13157c478bd9Sstevel@tonic-gate  * check if we already hold the ulockfs->ul_lock mutex.
13167c478bd9Sstevel@tonic-gate  */
13177c478bd9Sstevel@tonic-gate static int
ufs_lockfs_is_under_rawlockfs(struct ulockfs * ulp)13187c478bd9Sstevel@tonic-gate ufs_lockfs_is_under_rawlockfs(struct ulockfs *ulp)
13197c478bd9Sstevel@tonic-gate {
13207c478bd9Sstevel@tonic-gate 	return ((mutex_owner(&ulp->ul_lock) != curthread) ? 0 : 1);
13217c478bd9Sstevel@tonic-gate }
13227c478bd9Sstevel@tonic-gate 
13237c478bd9Sstevel@tonic-gate /*
13247c478bd9Sstevel@tonic-gate  * ufs_lockfs_begin - start the lockfs locking protocol
13257c478bd9Sstevel@tonic-gate  */
13267c478bd9Sstevel@tonic-gate int
ufs_lockfs_begin(struct ufsvfs * ufsvfsp,struct ulockfs ** ulpp,ulong_t mask)13277c478bd9Sstevel@tonic-gate ufs_lockfs_begin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
13287c478bd9Sstevel@tonic-gate {
13297c478bd9Sstevel@tonic-gate 	int 		error;
13307c478bd9Sstevel@tonic-gate 	int		rec_vop;
13316ea97f2eSvsakar 	ushort_t	op_cnt_incremented = 0;
13326ea97f2eSvsakar 	ulong_t		*ctr;
13337c478bd9Sstevel@tonic-gate 	struct ulockfs *ulp;
13347c478bd9Sstevel@tonic-gate 	ulockfs_info_t	*ulockfs_info;
13357c478bd9Sstevel@tonic-gate 	ulockfs_info_t	*ulockfs_info_free;
13367c478bd9Sstevel@tonic-gate 	ulockfs_info_t	*ulockfs_info_temp;
13377c478bd9Sstevel@tonic-gate 
13387c478bd9Sstevel@tonic-gate 	/*
13397c478bd9Sstevel@tonic-gate 	 * file system has been forcibly unmounted
13407c478bd9Sstevel@tonic-gate 	 */
13417c478bd9Sstevel@tonic-gate 	if (ufsvfsp == NULL)
13427c478bd9Sstevel@tonic-gate 		return (EIO);
13437c478bd9Sstevel@tonic-gate 
13447c478bd9Sstevel@tonic-gate 	*ulpp = ulp = &ufsvfsp->vfs_ulockfs;
13457c478bd9Sstevel@tonic-gate 
13467c478bd9Sstevel@tonic-gate 	/*
13477c478bd9Sstevel@tonic-gate 	 * Do lockfs protocol
13487c478bd9Sstevel@tonic-gate 	 */
13497c478bd9Sstevel@tonic-gate 	ulockfs_info = (ulockfs_info_t *)tsd_get(ufs_lockfs_key);
13507c478bd9Sstevel@tonic-gate 	IS_REC_VOP(rec_vop, ulockfs_info, ulp, ulockfs_info_free);
13517c478bd9Sstevel@tonic-gate 
13527c478bd9Sstevel@tonic-gate 	/*
13537c478bd9Sstevel@tonic-gate 	 * Detect recursive VOP call or handcrafted internal lockfs protocol
13547c478bd9Sstevel@tonic-gate 	 * path and bail out in that case.
13557c478bd9Sstevel@tonic-gate 	 */
13567c478bd9Sstevel@tonic-gate 	if (rec_vop || ufs_lockfs_is_under_rawlockfs(ulp)) {
13577c478bd9Sstevel@tonic-gate 		*ulpp = NULL;
13587c478bd9Sstevel@tonic-gate 		return (0);
13597c478bd9Sstevel@tonic-gate 	} else {
13607c478bd9Sstevel@tonic-gate 		if (ulockfs_info_free == NULL) {
13617c478bd9Sstevel@tonic-gate 			if ((ulockfs_info_temp = (ulockfs_info_t *)
13627c478bd9Sstevel@tonic-gate 			    kmem_zalloc(sizeof (ulockfs_info_t),
13637c478bd9Sstevel@tonic-gate 			    KM_NOSLEEP)) == NULL) {
13647c478bd9Sstevel@tonic-gate 				*ulpp = NULL;
13657c478bd9Sstevel@tonic-gate 				return (ENOMEM);
13667c478bd9Sstevel@tonic-gate 			}
13677c478bd9Sstevel@tonic-gate 		}
13687c478bd9Sstevel@tonic-gate 	}
13697c478bd9Sstevel@tonic-gate 
13707c478bd9Sstevel@tonic-gate 	/*
13717c478bd9Sstevel@tonic-gate 	 * First time VOP call
13726ea97f2eSvsakar 	 *
13736ea97f2eSvsakar 	 * Increment the ctr irrespective of the lockfs state. If the lockfs
13746ea97f2eSvsakar 	 * state is not ULOCKFS_ULOCK, we can decrement it later. However,
13756ea97f2eSvsakar 	 * before incrementing we need to check if there is a pending quiesce
13766ea97f2eSvsakar 	 * request because if we have a continuous stream of ufs_lockfs_begin
13776ea97f2eSvsakar 	 * requests pounding on a few cpu's then the ufs_quiesce thread might
13786ea97f2eSvsakar 	 * never see the value of zero for ctr - a livelock kind of scenario.
13796ea97f2eSvsakar 	 */
13806ea97f2eSvsakar 	ctr = (mask & ULOCKFS_FWLOCK) ?
13816ea97f2eSvsakar 	    &ulp->ul_falloc_cnt : &ulp->ul_vnops_cnt;
13826ea97f2eSvsakar 	if (!ULOCKFS_IS_SLOCK(ulp)) {
1383*1a5e258fSJosef 'Jeff' Sipek 		atomic_inc_ulong(ctr);
13846ea97f2eSvsakar 		op_cnt_incremented++;
13856ea97f2eSvsakar 	}
13866ea97f2eSvsakar 
13876ea97f2eSvsakar 	/*
13886ea97f2eSvsakar 	 * If the lockfs state (indicated by ul_fs_lock) is not just
13896ea97f2eSvsakar 	 * ULOCKFS_ULOCK, then we will be routed through ufs_check_lockfs
13906ea97f2eSvsakar 	 * where there is a check with an appropriate mask to selectively allow
13916ea97f2eSvsakar 	 * operations permitted for that kind of lockfs state.
13926ea97f2eSvsakar 	 *
13936ea97f2eSvsakar 	 * Even these selective operations should not be allowed to go through
13946ea97f2eSvsakar 	 * if a lockfs request is in progress because that could result in inode
13956ea97f2eSvsakar 	 * modifications during a quiesce and could hence result in inode
13966ea97f2eSvsakar 	 * reconciliation failures. ULOCKFS_SLOCK alone would not be sufficient,
13976ea97f2eSvsakar 	 * so make use of ufs_quiesce_pend to disallow vnode operations when a
13986ea97f2eSvsakar 	 * quiesce is in progress.
13996ea97f2eSvsakar 	 */
14006ea97f2eSvsakar 	if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
14016ea97f2eSvsakar 		if (op_cnt_incremented)
1402*1a5e258fSJosef 'Jeff' Sipek 			if (!atomic_dec_ulong_nv(ctr))
14036ea97f2eSvsakar 				cv_broadcast(&ulp->ul_cv);
14046ea97f2eSvsakar 		mutex_enter(&ulp->ul_lock);
14056ea97f2eSvsakar 		error = ufs_check_lockfs(ufsvfsp, ulp, mask);
14066ea97f2eSvsakar 		mutex_exit(&ulp->ul_lock);
14076ea97f2eSvsakar 		if (error) {
14087c478bd9Sstevel@tonic-gate 			if (ulockfs_info_free == NULL)
14097c478bd9Sstevel@tonic-gate 				kmem_free(ulockfs_info_temp,
14107c478bd9Sstevel@tonic-gate 				    sizeof (ulockfs_info_t));
14117c478bd9Sstevel@tonic-gate 			return (error);
14127c478bd9Sstevel@tonic-gate 		}
14136ea97f2eSvsakar 	} else {
14146ea97f2eSvsakar 		/*
14156ea97f2eSvsakar 		 * This is the common case of file system in a unlocked state.
14166ea97f2eSvsakar 		 *
14176ea97f2eSvsakar 		 * If a file system is unlocked, we would expect the ctr to have
14186ea97f2eSvsakar 		 * been incremented by now. But this will not be true when a
14196ea97f2eSvsakar 		 * quiesce is winding up - SLOCK was set when we checked before
14206ea97f2eSvsakar 		 * incrementing the ctr, but by the time we checked for
14216ea97f2eSvsakar 		 * ULOCKFS_IS_JUSTULOCK, the quiesce thread was gone. It is okay
14226ea97f2eSvsakar 		 * to take ul_lock and go through the slow path in this uncommon
14236ea97f2eSvsakar 		 * case.
14246ea97f2eSvsakar 		 */
14256ea97f2eSvsakar 		if (op_cnt_incremented == 0) {
14266ea97f2eSvsakar 			mutex_enter(&ulp->ul_lock);
14276ea97f2eSvsakar 			error = ufs_check_lockfs(ufsvfsp, ulp, mask);
14286ea97f2eSvsakar 			if (error) {
14296ea97f2eSvsakar 				mutex_exit(&ulp->ul_lock);
14306ea97f2eSvsakar 				if (ulockfs_info_free == NULL)
14316ea97f2eSvsakar 					kmem_free(ulockfs_info_temp,
14326ea97f2eSvsakar 					    sizeof (ulockfs_info_t));
14336ea97f2eSvsakar 				return (error);
14346ea97f2eSvsakar 			}
14356ea97f2eSvsakar 			if (mask & ULOCKFS_FWLOCK)
14366ea97f2eSvsakar 				ULOCKFS_SET_FALLOC(ulp);
14376ea97f2eSvsakar 			mutex_exit(&ulp->ul_lock);
14386ea97f2eSvsakar 		} else if (mask & ULOCKFS_FWLOCK) {
14396ea97f2eSvsakar 			mutex_enter(&ulp->ul_lock);
14406ea97f2eSvsakar 			ULOCKFS_SET_FALLOC(ulp);
14416ea97f2eSvsakar 			mutex_exit(&ulp->ul_lock);
14426ea97f2eSvsakar 		}
14437c478bd9Sstevel@tonic-gate 	}
14447c478bd9Sstevel@tonic-gate 
14457c478bd9Sstevel@tonic-gate 	if (ulockfs_info_free != NULL) {
14467c478bd9Sstevel@tonic-gate 		ulockfs_info_free->ulp = ulp;
1447303bf60bSsdebnath 		if (mask & ULOCKFS_FWLOCK)
1448303bf60bSsdebnath 			ulockfs_info_free->flags |= ULOCK_INFO_FALLOCATE;
14497c478bd9Sstevel@tonic-gate 	} else {
14507c478bd9Sstevel@tonic-gate 		ulockfs_info_temp->ulp = ulp;
14517c478bd9Sstevel@tonic-gate 		ulockfs_info_temp->next = ulockfs_info;
1452303bf60bSsdebnath 		if (mask & ULOCKFS_FWLOCK)
1453303bf60bSsdebnath 			ulockfs_info_temp->flags |= ULOCK_INFO_FALLOCATE;
14547c478bd9Sstevel@tonic-gate 		ASSERT(ufs_lockfs_key != 0);
14557c478bd9Sstevel@tonic-gate 		(void) tsd_set(ufs_lockfs_key, (void *)ulockfs_info_temp);
14567c478bd9Sstevel@tonic-gate 	}
14577c478bd9Sstevel@tonic-gate 
14587c478bd9Sstevel@tonic-gate 	curthread->t_flag |= T_DONTBLOCK;
14597c478bd9Sstevel@tonic-gate 	return (0);
14607c478bd9Sstevel@tonic-gate }
14617c478bd9Sstevel@tonic-gate 
14627c478bd9Sstevel@tonic-gate /*
14637c478bd9Sstevel@tonic-gate  * Check whether we are returning from the top level VOP.
14647c478bd9Sstevel@tonic-gate  */
14657c478bd9Sstevel@tonic-gate static int
ufs_lockfs_top_vop_return(ulockfs_info_t * head)14667c478bd9Sstevel@tonic-gate ufs_lockfs_top_vop_return(ulockfs_info_t *head)
14677c478bd9Sstevel@tonic-gate {
14687c478bd9Sstevel@tonic-gate 	ulockfs_info_t *info;
14697c478bd9Sstevel@tonic-gate 	int result = 1;
14707c478bd9Sstevel@tonic-gate 
14717c478bd9Sstevel@tonic-gate 	for (info = head; info != NULL; info = info->next) {
14727c478bd9Sstevel@tonic-gate 		if (info->ulp != NULL) {
14737c478bd9Sstevel@tonic-gate 			result = 0;
14747c478bd9Sstevel@tonic-gate 			break;
14757c478bd9Sstevel@tonic-gate 		}
14767c478bd9Sstevel@tonic-gate 	}
14777c478bd9Sstevel@tonic-gate 
14787c478bd9Sstevel@tonic-gate 	return (result);
14797c478bd9Sstevel@tonic-gate }
14807c478bd9Sstevel@tonic-gate 
14817c478bd9Sstevel@tonic-gate /*
14827c478bd9Sstevel@tonic-gate  * ufs_lockfs_end - terminate the lockfs locking protocol
14837c478bd9Sstevel@tonic-gate  */
14847c478bd9Sstevel@tonic-gate void
ufs_lockfs_end(struct ulockfs * ulp)14857c478bd9Sstevel@tonic-gate ufs_lockfs_end(struct ulockfs *ulp)
14867c478bd9Sstevel@tonic-gate {
14877c478bd9Sstevel@tonic-gate 	ulockfs_info_t *info;
14887c478bd9Sstevel@tonic-gate 	ulockfs_info_t *head;
14897c478bd9Sstevel@tonic-gate 
14907c478bd9Sstevel@tonic-gate 	/*
14917c478bd9Sstevel@tonic-gate 	 * end-of-VOP protocol
14927c478bd9Sstevel@tonic-gate 	 */
14937c478bd9Sstevel@tonic-gate 	if (ulp == NULL)
14947c478bd9Sstevel@tonic-gate 		return;
14957c478bd9Sstevel@tonic-gate 
14967c478bd9Sstevel@tonic-gate 	head = (ulockfs_info_t *)tsd_get(ufs_lockfs_key);
14977c478bd9Sstevel@tonic-gate 	SEARCH_ULOCKFSP(head, ulp, info);
14987c478bd9Sstevel@tonic-gate 
14997c478bd9Sstevel@tonic-gate 	/*
15007c478bd9Sstevel@tonic-gate 	 * If we're called from a first level VOP, we have to have a
15017c478bd9Sstevel@tonic-gate 	 * valid ulockfs record in the TSD.
15027c478bd9Sstevel@tonic-gate 	 */
15037c478bd9Sstevel@tonic-gate 	ASSERT(info != NULL);
15047c478bd9Sstevel@tonic-gate 
15057c478bd9Sstevel@tonic-gate 	/*
15067c478bd9Sstevel@tonic-gate 	 * Invalidate the ulockfs record.
15077c478bd9Sstevel@tonic-gate 	 */
15087c478bd9Sstevel@tonic-gate 	info->ulp = NULL;
15097c478bd9Sstevel@tonic-gate 
15107c478bd9Sstevel@tonic-gate 	if (ufs_lockfs_top_vop_return(head))
15117c478bd9Sstevel@tonic-gate 		curthread->t_flag &= ~T_DONTBLOCK;
15127c478bd9Sstevel@tonic-gate 
1513303bf60bSsdebnath 	/* fallocate thread */
1514303bf60bSsdebnath 	if (ULOCKFS_IS_FALLOC(ulp) && info->flags & ULOCK_INFO_FALLOCATE) {
15156ea97f2eSvsakar 		/* Clear the thread's fallocate state */
15166ea97f2eSvsakar 		info->flags &= ~ULOCK_INFO_FALLOCATE;
1517*1a5e258fSJosef 'Jeff' Sipek 		if (!atomic_dec_ulong_nv(&ulp->ul_falloc_cnt)) {
15186ea97f2eSvsakar 			mutex_enter(&ulp->ul_lock);
1519303bf60bSsdebnath 			ULOCKFS_CLR_FALLOC(ulp);
15206ea97f2eSvsakar 			cv_broadcast(&ulp->ul_cv);
15216ea97f2eSvsakar 			mutex_exit(&ulp->ul_lock);
15226ea97f2eSvsakar 		}
1523303bf60bSsdebnath 	} else  { /* normal thread */
1524*1a5e258fSJosef 'Jeff' Sipek 		if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
1525303bf60bSsdebnath 			cv_broadcast(&ulp->ul_cv);
1526303bf60bSsdebnath 	}
15277c478bd9Sstevel@tonic-gate }
15287c478bd9Sstevel@tonic-gate 
15296ac3b8a8Svsakar /*
15306ac3b8a8Svsakar  * ufs_lockfs_trybegin - try to start the lockfs locking protocol without
15316ac3b8a8Svsakar  * blocking.
15326ac3b8a8Svsakar  */
15336ac3b8a8Svsakar int
ufs_lockfs_trybegin(struct ufsvfs * ufsvfsp,struct ulockfs ** ulpp,ulong_t mask)15346ac3b8a8Svsakar ufs_lockfs_trybegin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
15356ac3b8a8Svsakar {
15366ac3b8a8Svsakar 	int 		error = 0;
15376ac3b8a8Svsakar 	int		rec_vop;
15386ea97f2eSvsakar 	ushort_t	op_cnt_incremented = 0;
15396ea97f2eSvsakar 	ulong_t		*ctr;
15406ac3b8a8Svsakar 	struct ulockfs *ulp;
15416ac3b8a8Svsakar 	ulockfs_info_t	*ulockfs_info;
15426ac3b8a8Svsakar 	ulockfs_info_t	*ulockfs_info_free;
15436ac3b8a8Svsakar 	ulockfs_info_t	*ulockfs_info_temp;
15446ac3b8a8Svsakar 
15456ac3b8a8Svsakar 	/*
15466ac3b8a8Svsakar 	 * file system has been forcibly unmounted
15476ac3b8a8Svsakar 	 */
15486ac3b8a8Svsakar 	if (ufsvfsp == NULL)
15496ac3b8a8Svsakar 		return (EIO);
15506ac3b8a8Svsakar 
15516ac3b8a8Svsakar 	*ulpp = ulp = &ufsvfsp->vfs_ulockfs;
15526ac3b8a8Svsakar 
15536ac3b8a8Svsakar 	/*
15546ac3b8a8Svsakar 	 * Do lockfs protocol
15556ac3b8a8Svsakar 	 */
15566ac3b8a8Svsakar 	ulockfs_info = (ulockfs_info_t *)tsd_get(ufs_lockfs_key);
15576ac3b8a8Svsakar 	IS_REC_VOP(rec_vop, ulockfs_info, ulp, ulockfs_info_free);
15586ac3b8a8Svsakar 
15596ac3b8a8Svsakar 	/*
15606ac3b8a8Svsakar 	 * Detect recursive VOP call or handcrafted internal lockfs protocol
15616ac3b8a8Svsakar 	 * path and bail out in that case.
15626ac3b8a8Svsakar 	 */
15636ac3b8a8Svsakar 	if (rec_vop || ufs_lockfs_is_under_rawlockfs(ulp)) {
15646ac3b8a8Svsakar 		*ulpp = NULL;
15656ac3b8a8Svsakar 		return (0);
15666ac3b8a8Svsakar 	} else {
15676ac3b8a8Svsakar 		if (ulockfs_info_free == NULL) {
15686ac3b8a8Svsakar 			if ((ulockfs_info_temp = (ulockfs_info_t *)
15696ac3b8a8Svsakar 			    kmem_zalloc(sizeof (ulockfs_info_t),
15706ac3b8a8Svsakar 			    KM_NOSLEEP)) == NULL) {
15716ac3b8a8Svsakar 				*ulpp = NULL;
15726ac3b8a8Svsakar 				return (ENOMEM);
15736ac3b8a8Svsakar 			}
15746ac3b8a8Svsakar 		}
15756ac3b8a8Svsakar 	}
15766ac3b8a8Svsakar 
15776ac3b8a8Svsakar 	/*
15786ac3b8a8Svsakar 	 * First time VOP call
15796ea97f2eSvsakar 	 *
15806ea97f2eSvsakar 	 * Increment the ctr irrespective of the lockfs state. If the lockfs
15816ea97f2eSvsakar 	 * state is not ULOCKFS_ULOCK, we can decrement it later. However,
15826ea97f2eSvsakar 	 * before incrementing we need to check if there is a pending quiesce
15836ea97f2eSvsakar 	 * request because if we have a continuous stream of ufs_lockfs_begin
15846ea97f2eSvsakar 	 * requests pounding on a few cpu's then the ufs_quiesce thread might
15856ea97f2eSvsakar 	 * never see the value of zero for ctr - a livelock kind of scenario.
15866ea97f2eSvsakar 	 */
15876ea97f2eSvsakar 	ctr = (mask & ULOCKFS_FWLOCK) ?
15886ea97f2eSvsakar 	    &ulp->ul_falloc_cnt : &ulp->ul_vnops_cnt;
15896ea97f2eSvsakar 	if (!ULOCKFS_IS_SLOCK(ulp)) {
1590*1a5e258fSJosef 'Jeff' Sipek 		atomic_inc_ulong(ctr);
15916ea97f2eSvsakar 		op_cnt_incremented++;
15926ea97f2eSvsakar 	}
15936ea97f2eSvsakar 
15946ea97f2eSvsakar 	if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
15956ac3b8a8Svsakar 		/*
15966ac3b8a8Svsakar 		 * Non-blocking version of ufs_check_lockfs() code.
15976ac3b8a8Svsakar 		 *
15986ac3b8a8Svsakar 		 * If the file system is not hard locked or error locked
15996ac3b8a8Svsakar 		 * and if ulp->ul_fs_lock allows this operation, increment
16006ac3b8a8Svsakar 		 * the appropriate counter and proceed (For eg., In case the
16016ac3b8a8Svsakar 		 * file system is delete locked, a mmap can still go through).
16026ac3b8a8Svsakar 		 */
16036ea97f2eSvsakar 		if (op_cnt_incremented)
1604*1a5e258fSJosef 'Jeff' Sipek 			if (!atomic_dec_ulong_nv(ctr))
16056ea97f2eSvsakar 				cv_broadcast(&ulp->ul_cv);
16066ea97f2eSvsakar 		mutex_enter(&ulp->ul_lock);
16076ac3b8a8Svsakar 		if (ULOCKFS_IS_HLOCK(ulp) ||
16086ac3b8a8Svsakar 		    (ULOCKFS_IS_ELOCK(ulp) && ufsvfsp->vfs_dontblock))
16096ac3b8a8Svsakar 			error = EIO;
16106ac3b8a8Svsakar 		else if (ulp->ul_fs_lock & mask)
16116ac3b8a8Svsakar 			error = EAGAIN;
16126ac3b8a8Svsakar 
16136ac3b8a8Svsakar 		if (error) {
16146ac3b8a8Svsakar 			mutex_exit(&ulp->ul_lock);
16156ac3b8a8Svsakar 			if (ulockfs_info_free == NULL)
16166ac3b8a8Svsakar 				kmem_free(ulockfs_info_temp,
16176ac3b8a8Svsakar 				    sizeof (ulockfs_info_t));
16186ac3b8a8Svsakar 			return (error);
16196ea97f2eSvsakar 		}
1620*1a5e258fSJosef 'Jeff' Sipek 		atomic_inc_ulong(ctr);
16216ea97f2eSvsakar 		if (mask & ULOCKFS_FWLOCK)
16226ea97f2eSvsakar 			ULOCKFS_SET_FALLOC(ulp);
16236ea97f2eSvsakar 		mutex_exit(&ulp->ul_lock);
16246ea97f2eSvsakar 	} else {
16256ea97f2eSvsakar 		/*
16266ea97f2eSvsakar 		 * This is the common case of file system in a unlocked state.
16276ea97f2eSvsakar 		 *
16286ea97f2eSvsakar 		 * If a file system is unlocked, we would expect the ctr to have
16296ea97f2eSvsakar 		 * been incremented by now. But this will not be true when a
16306ea97f2eSvsakar 		 * quiesce is winding up - SLOCK was set when we checked before
16316ea97f2eSvsakar 		 * incrementing the ctr, but by the time we checked for
16326ea97f2eSvsakar 		 * ULOCKFS_IS_JUSTULOCK, the quiesce thread was gone. Take
16336ea97f2eSvsakar 		 * ul_lock and go through the non-blocking version of
16346ea97f2eSvsakar 		 * ufs_check_lockfs() code.
16356ea97f2eSvsakar 		 */
16366ea97f2eSvsakar 		if (op_cnt_incremented == 0) {
16376ea97f2eSvsakar 			mutex_enter(&ulp->ul_lock);
16386ea97f2eSvsakar 			if (ULOCKFS_IS_HLOCK(ulp) ||
16396ea97f2eSvsakar 			    (ULOCKFS_IS_ELOCK(ulp) && ufsvfsp->vfs_dontblock))
16406ea97f2eSvsakar 				error = EIO;
16416ea97f2eSvsakar 			else if (ulp->ul_fs_lock & mask)
16426ea97f2eSvsakar 				error = EAGAIN;
16436ea97f2eSvsakar 
16446ea97f2eSvsakar 			if (error) {
16456ea97f2eSvsakar 				mutex_exit(&ulp->ul_lock);
16466ea97f2eSvsakar 				if (ulockfs_info_free == NULL)
16476ea97f2eSvsakar 					kmem_free(ulockfs_info_temp,
16486ea97f2eSvsakar 					    sizeof (ulockfs_info_t));
16496ea97f2eSvsakar 				return (error);
16506ac3b8a8Svsakar 			}
1651*1a5e258fSJosef 'Jeff' Sipek 			atomic_inc_ulong(ctr);
16526ea97f2eSvsakar 			if (mask & ULOCKFS_FWLOCK)
16536ea97f2eSvsakar 				ULOCKFS_SET_FALLOC(ulp);
16546ea97f2eSvsakar 			mutex_exit(&ulp->ul_lock);
16556ea97f2eSvsakar 		} else if (mask & ULOCKFS_FWLOCK) {
16566ea97f2eSvsakar 			mutex_enter(&ulp->ul_lock);
16576ea97f2eSvsakar 			ULOCKFS_SET_FALLOC(ulp);
16586ea97f2eSvsakar 			mutex_exit(&ulp->ul_lock);
16596ac3b8a8Svsakar 		}
16606ac3b8a8Svsakar 	}
16616ac3b8a8Svsakar 
16626ac3b8a8Svsakar 	if (ulockfs_info_free != NULL) {
16636ac3b8a8Svsakar 		ulockfs_info_free->ulp = ulp;
16646ac3b8a8Svsakar 		if (mask & ULOCKFS_FWLOCK)
16656ac3b8a8Svsakar 			ulockfs_info_free->flags |= ULOCK_INFO_FALLOCATE;
16666ac3b8a8Svsakar 	} else {
16676ac3b8a8Svsakar 		ulockfs_info_temp->ulp = ulp;
16686ac3b8a8Svsakar 		ulockfs_info_temp->next = ulockfs_info;
16696ac3b8a8Svsakar 		if (mask & ULOCKFS_FWLOCK)
16706ac3b8a8Svsakar 			ulockfs_info_temp->flags |= ULOCK_INFO_FALLOCATE;
16716ac3b8a8Svsakar 		ASSERT(ufs_lockfs_key != 0);
16726ac3b8a8Svsakar 		(void) tsd_set(ufs_lockfs_key, (void *)ulockfs_info_temp);
16736ac3b8a8Svsakar 	}
16746ac3b8a8Svsakar 
16756ac3b8a8Svsakar 	curthread->t_flag |= T_DONTBLOCK;
16766ac3b8a8Svsakar 	return (0);
16776ac3b8a8Svsakar }
16786ac3b8a8Svsakar 
16797c478bd9Sstevel@tonic-gate /*
16807c478bd9Sstevel@tonic-gate  * specialized version of ufs_lockfs_begin() called by ufs_getpage().
16817c478bd9Sstevel@tonic-gate  */
16827c478bd9Sstevel@tonic-gate int
ufs_lockfs_begin_getpage(struct ufsvfs * ufsvfsp,struct ulockfs ** ulpp,struct seg * seg,int read_access,uint_t * protp)16837c478bd9Sstevel@tonic-gate ufs_lockfs_begin_getpage(
16847c478bd9Sstevel@tonic-gate 	struct ufsvfs	*ufsvfsp,
16857c478bd9Sstevel@tonic-gate 	struct ulockfs	**ulpp,
16867c478bd9Sstevel@tonic-gate 	struct seg	*seg,
16877c478bd9Sstevel@tonic-gate 	int		read_access,
16887c478bd9Sstevel@tonic-gate 	uint_t		*protp)
16897c478bd9Sstevel@tonic-gate {
16907c478bd9Sstevel@tonic-gate 	ulong_t			mask;
16917c478bd9Sstevel@tonic-gate 	int 			error;
16927c478bd9Sstevel@tonic-gate 	int			rec_vop;
16937c478bd9Sstevel@tonic-gate 	struct ulockfs		*ulp;
16947c478bd9Sstevel@tonic-gate 	ulockfs_info_t		*ulockfs_info;
16957c478bd9Sstevel@tonic-gate 	ulockfs_info_t		*ulockfs_info_free;
16967c478bd9Sstevel@tonic-gate 	ulockfs_info_t		*ulockfs_info_temp;
16977c478bd9Sstevel@tonic-gate 
16987c478bd9Sstevel@tonic-gate 	/*
16997c478bd9Sstevel@tonic-gate 	 * file system has been forcibly unmounted
17007c478bd9Sstevel@tonic-gate 	 */
17017c478bd9Sstevel@tonic-gate 	if (ufsvfsp == NULL)
17027c478bd9Sstevel@tonic-gate 		return (EIO);
17037c478bd9Sstevel@tonic-gate 
17047c478bd9Sstevel@tonic-gate 	*ulpp = ulp = &ufsvfsp->vfs_ulockfs;
17057c478bd9Sstevel@tonic-gate 
17067c478bd9Sstevel@tonic-gate 	/*
17077c478bd9Sstevel@tonic-gate 	 * Do lockfs protocol
17087c478bd9Sstevel@tonic-gate 	 */
17097c478bd9Sstevel@tonic-gate 	ulockfs_info = (ulockfs_info_t *)tsd_get(ufs_lockfs_key);
17107c478bd9Sstevel@tonic-gate 	IS_REC_VOP(rec_vop, ulockfs_info, ulp, ulockfs_info_free);
17117c478bd9Sstevel@tonic-gate 
17127c478bd9Sstevel@tonic-gate 	/*
17137c478bd9Sstevel@tonic-gate 	 * Detect recursive VOP call or handcrafted internal lockfs protocol
17147c478bd9Sstevel@tonic-gate 	 * path and bail out in that case.
17157c478bd9Sstevel@tonic-gate 	 */
17167c478bd9Sstevel@tonic-gate 	if (rec_vop || ufs_lockfs_is_under_rawlockfs(ulp)) {
17177c478bd9Sstevel@tonic-gate 		*ulpp = NULL;
17187c478bd9Sstevel@tonic-gate 		return (0);
17197c478bd9Sstevel@tonic-gate 	} else {
17207c478bd9Sstevel@tonic-gate 		if (ulockfs_info_free == NULL) {
17217c478bd9Sstevel@tonic-gate 			if ((ulockfs_info_temp = (ulockfs_info_t *)
17227c478bd9Sstevel@tonic-gate 			    kmem_zalloc(sizeof (ulockfs_info_t),
17237c478bd9Sstevel@tonic-gate 			    KM_NOSLEEP)) == NULL) {
17247c478bd9Sstevel@tonic-gate 				*ulpp = NULL;
17257c478bd9Sstevel@tonic-gate 				return (ENOMEM);
17267c478bd9Sstevel@tonic-gate 			}
17277c478bd9Sstevel@tonic-gate 		}
17287c478bd9Sstevel@tonic-gate 	}
17297c478bd9Sstevel@tonic-gate 
17307c478bd9Sstevel@tonic-gate 	/*
17317c478bd9Sstevel@tonic-gate 	 * First time VOP call
17327c478bd9Sstevel@tonic-gate 	 */
1733*1a5e258fSJosef 'Jeff' Sipek 	atomic_inc_ulong(&ulp->ul_vnops_cnt);
17346ea97f2eSvsakar 	if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
1735*1a5e258fSJosef 'Jeff' Sipek 		if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
17366ea97f2eSvsakar 			cv_broadcast(&ulp->ul_cv);
17376ea97f2eSvsakar 		mutex_enter(&ulp->ul_lock);
17387c478bd9Sstevel@tonic-gate 		if (seg->s_ops == &segvn_ops &&
17397c478bd9Sstevel@tonic-gate 		    ((struct segvn_data *)seg->s_data)->type != MAP_SHARED) {
17407c478bd9Sstevel@tonic-gate 			mask = (ulong_t)ULOCKFS_GETREAD_MASK;
17417c478bd9Sstevel@tonic-gate 		} else if (protp && read_access) {
17427c478bd9Sstevel@tonic-gate 			/*
17437c478bd9Sstevel@tonic-gate 			 * Restrict the mapping to readonly.
17447c478bd9Sstevel@tonic-gate 			 * Writes to this mapping will cause
17457c478bd9Sstevel@tonic-gate 			 * another fault which will then
17467c478bd9Sstevel@tonic-gate 			 * be suspended if fs is write locked
17477c478bd9Sstevel@tonic-gate 			 */
17487c478bd9Sstevel@tonic-gate 			*protp &= ~PROT_WRITE;
17497c478bd9Sstevel@tonic-gate 			mask = (ulong_t)ULOCKFS_GETREAD_MASK;
17507c478bd9Sstevel@tonic-gate 		} else
17517c478bd9Sstevel@tonic-gate 			mask = (ulong_t)ULOCKFS_GETWRITE_MASK;
17527c478bd9Sstevel@tonic-gate 
17537c478bd9Sstevel@tonic-gate 		/*
17547c478bd9Sstevel@tonic-gate 		 * will sleep if this fs is locked against this VOP
17557c478bd9Sstevel@tonic-gate 		 */
17566ea97f2eSvsakar 		error = ufs_check_lockfs(ufsvfsp, ulp, mask);
17576ea97f2eSvsakar 		mutex_exit(&ulp->ul_lock);
17586ea97f2eSvsakar 		if (error) {
17597c478bd9Sstevel@tonic-gate 			if (ulockfs_info_free == NULL)
17607c478bd9Sstevel@tonic-gate 				kmem_free(ulockfs_info_temp,
17617c478bd9Sstevel@tonic-gate 				    sizeof (ulockfs_info_t));
17627c478bd9Sstevel@tonic-gate 			return (error);
17637c478bd9Sstevel@tonic-gate 		}
17647c478bd9Sstevel@tonic-gate 	}
17657c478bd9Sstevel@tonic-gate 
17667c478bd9Sstevel@tonic-gate 	if (ulockfs_info_free != NULL) {
17677c478bd9Sstevel@tonic-gate 		ulockfs_info_free->ulp = ulp;
17687c478bd9Sstevel@tonic-gate 	} else {
17697c478bd9Sstevel@tonic-gate 		ulockfs_info_temp->ulp = ulp;
17707c478bd9Sstevel@tonic-gate 		ulockfs_info_temp->next = ulockfs_info;
17717c478bd9Sstevel@tonic-gate 		ASSERT(ufs_lockfs_key != 0);
17727c478bd9Sstevel@tonic-gate 		(void) tsd_set(ufs_lockfs_key, (void *)ulockfs_info_temp);
17737c478bd9Sstevel@tonic-gate 	}
17747c478bd9Sstevel@tonic-gate 
17757c478bd9Sstevel@tonic-gate 	curthread->t_flag |= T_DONTBLOCK;
17767c478bd9Sstevel@tonic-gate 	return (0);
17777c478bd9Sstevel@tonic-gate }
17787c478bd9Sstevel@tonic-gate 
17797c478bd9Sstevel@tonic-gate void
ufs_lockfs_tsd_destructor(void * head)17807c478bd9Sstevel@tonic-gate ufs_lockfs_tsd_destructor(void *head)
17817c478bd9Sstevel@tonic-gate {
17827c478bd9Sstevel@tonic-gate 	ulockfs_info_t *curr = (ulockfs_info_t *)head;
17837c478bd9Sstevel@tonic-gate 	ulockfs_info_t *temp;
17847c478bd9Sstevel@tonic-gate 
17857c478bd9Sstevel@tonic-gate 	for (; curr != NULL; ) {
17867c478bd9Sstevel@tonic-gate 		/*
17877c478bd9Sstevel@tonic-gate 		 * The TSD destructor is being called when the thread exits
17887c478bd9Sstevel@tonic-gate 		 * (via thread_exit()). At that time it must have cleaned up
17897c478bd9Sstevel@tonic-gate 		 * all VOPs via ufs_lockfs_end() and there must not be a
17907c478bd9Sstevel@tonic-gate 		 * valid ulockfs record exist while a thread is exiting.
17917c478bd9Sstevel@tonic-gate 		 */
17927c478bd9Sstevel@tonic-gate 		temp = curr;
17937c478bd9Sstevel@tonic-gate 		curr = curr->next;
17947c478bd9Sstevel@tonic-gate 		ASSERT(temp->ulp == NULL);
17957c478bd9Sstevel@tonic-gate 		kmem_free(temp, sizeof (ulockfs_info_t));
17967c478bd9Sstevel@tonic-gate 	}
17977c478bd9Sstevel@tonic-gate }
1798