xref: /illumos-gate/usr/src/uts/common/fs/hsfs/hsfs_vnops.c (revision aa59c4cb15a6ac5d4e585dadf7a055b580abf579)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
59cbc422eSpeterte  * Common Development and Distribution License (the "License").
69cbc422eSpeterte  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22*aa59c4cbSrsb  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate /*
297c478bd9Sstevel@tonic-gate  * Vnode operations for the High Sierra filesystem
307c478bd9Sstevel@tonic-gate  */
317c478bd9Sstevel@tonic-gate 
327c478bd9Sstevel@tonic-gate #include <sys/types.h>
337c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
347c478bd9Sstevel@tonic-gate #include <sys/param.h>
357c478bd9Sstevel@tonic-gate #include <sys/time.h>
367c478bd9Sstevel@tonic-gate #include <sys/systm.h>
377c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
387c478bd9Sstevel@tonic-gate #include <sys/resource.h>
397c478bd9Sstevel@tonic-gate #include <sys/signal.h>
407c478bd9Sstevel@tonic-gate #include <sys/cred.h>
417c478bd9Sstevel@tonic-gate #include <sys/user.h>
427c478bd9Sstevel@tonic-gate #include <sys/buf.h>
437c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
44*aa59c4cbSrsb #include <sys/vfs_opreg.h>
457c478bd9Sstevel@tonic-gate #include <sys/stat.h>
467c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
477c478bd9Sstevel@tonic-gate #include <sys/mode.h>
487c478bd9Sstevel@tonic-gate #include <sys/proc.h>
497c478bd9Sstevel@tonic-gate #include <sys/disp.h>
507c478bd9Sstevel@tonic-gate #include <sys/file.h>
517c478bd9Sstevel@tonic-gate #include <sys/fcntl.h>
527c478bd9Sstevel@tonic-gate #include <sys/flock.h>
537c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
547c478bd9Sstevel@tonic-gate #include <sys/uio.h>
557c478bd9Sstevel@tonic-gate #include <sys/conf.h>
567c478bd9Sstevel@tonic-gate #include <sys/errno.h>
577c478bd9Sstevel@tonic-gate #include <sys/mman.h>
587c478bd9Sstevel@tonic-gate #include <sys/pathname.h>
597c478bd9Sstevel@tonic-gate #include <sys/debug.h>
607c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
617c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
627c478bd9Sstevel@tonic-gate #include <sys/fbuf.h>
637c478bd9Sstevel@tonic-gate #include <sys/dirent.h>
647c478bd9Sstevel@tonic-gate #include <sys/errno.h>
657c478bd9Sstevel@tonic-gate 
667c478bd9Sstevel@tonic-gate #include <vm/hat.h>
677c478bd9Sstevel@tonic-gate #include <vm/page.h>
687c478bd9Sstevel@tonic-gate #include <vm/pvn.h>
697c478bd9Sstevel@tonic-gate #include <vm/as.h>
707c478bd9Sstevel@tonic-gate #include <vm/seg.h>
717c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
727c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
737c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
747c478bd9Sstevel@tonic-gate #include <vm/rm.h>
757c478bd9Sstevel@tonic-gate #include <vm/page.h>
767c478bd9Sstevel@tonic-gate #include <sys/swap.h>
777c478bd9Sstevel@tonic-gate 
787c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_spec.h>
797c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_node.h>
807c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_impl.h>
817c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_susp.h>
827c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_rrip.h>
837c478bd9Sstevel@tonic-gate 
847c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h>
857c478bd9Sstevel@tonic-gate 
867c478bd9Sstevel@tonic-gate /* ARGSUSED */
877c478bd9Sstevel@tonic-gate static int
887c478bd9Sstevel@tonic-gate hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred)
897c478bd9Sstevel@tonic-gate {
907c478bd9Sstevel@tonic-gate 	return (0);
917c478bd9Sstevel@tonic-gate }
927c478bd9Sstevel@tonic-gate 
937c478bd9Sstevel@tonic-gate 
947c478bd9Sstevel@tonic-gate /*ARGSUSED*/
957c478bd9Sstevel@tonic-gate static int
967c478bd9Sstevel@tonic-gate hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
977c478bd9Sstevel@tonic-gate 	struct caller_context *ct)
987c478bd9Sstevel@tonic-gate {
997c478bd9Sstevel@tonic-gate 	caddr_t base;
1008cd7c4fcSpeterte 	offset_t diff;
1017c478bd9Sstevel@tonic-gate 	int error;
1028cd7c4fcSpeterte 	struct hsnode *hp;
1038cd7c4fcSpeterte 	uint_t filesize;
1047c478bd9Sstevel@tonic-gate 
1057c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
1067c478bd9Sstevel@tonic-gate 	/*
1077c478bd9Sstevel@tonic-gate 	 * if vp is of type VDIR, make sure dirent
1087c478bd9Sstevel@tonic-gate 	 * is filled up with all info (because of ptbl)
1097c478bd9Sstevel@tonic-gate 	 */
1107c478bd9Sstevel@tonic-gate 	if (vp->v_type == VDIR) {
1117c478bd9Sstevel@tonic-gate 		if (hp->hs_dirent.ext_size == 0)
1127c478bd9Sstevel@tonic-gate 			hs_filldirent(vp, &hp->hs_dirent);
1137c478bd9Sstevel@tonic-gate 	}
1147c478bd9Sstevel@tonic-gate 	filesize = hp->hs_dirent.ext_size;
1157c478bd9Sstevel@tonic-gate 
1168cd7c4fcSpeterte 	/* Sanity checks. */
1178cd7c4fcSpeterte 	if (uiop->uio_resid == 0 ||		/* No data wanted. */
1189cbc422eSpeterte 	    uiop->uio_loffset > HS_MAXFILEOFF ||	/* Offset too big. */
1198cd7c4fcSpeterte 	    uiop->uio_loffset >= filesize)	/* Past EOF. */
1208cd7c4fcSpeterte 		return (0);
1217c478bd9Sstevel@tonic-gate 
1227c478bd9Sstevel@tonic-gate 	do {
1238cd7c4fcSpeterte 		/*
1248cd7c4fcSpeterte 		 * We want to ask for only the "right" amount of data.
1258cd7c4fcSpeterte 		 * In this case that means:-
1268cd7c4fcSpeterte 		 *
1278cd7c4fcSpeterte 		 * We can't get data from beyond our EOF. If asked,
1288cd7c4fcSpeterte 		 * we will give a short read.
1298cd7c4fcSpeterte 		 *
1308cd7c4fcSpeterte 		 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
1318cd7c4fcSpeterte 		 * These buffers are always MAXBSIZE aligned.
1328cd7c4fcSpeterte 		 * If our starting offset is not MAXBSIZE aligned,
1338cd7c4fcSpeterte 		 * we can only ask for less than MAXBSIZE bytes.
1348cd7c4fcSpeterte 		 *
1358cd7c4fcSpeterte 		 * If our requested offset and length are such that
1368cd7c4fcSpeterte 		 * they belong in different MAXBSIZE aligned slots
1378cd7c4fcSpeterte 		 * then we'll be making more than one call on
1388cd7c4fcSpeterte 		 * segmap_getmapflt.
1398cd7c4fcSpeterte 		 *
1408cd7c4fcSpeterte 		 * This diagram shows the variables we use and their
1418cd7c4fcSpeterte 		 * relationships.
1428cd7c4fcSpeterte 		 *
1438cd7c4fcSpeterte 		 * |<-----MAXBSIZE----->|
1448cd7c4fcSpeterte 		 * +--------------------------...+
1458cd7c4fcSpeterte 		 * |.....mapon->|<--n-->|....*...|EOF
1468cd7c4fcSpeterte 		 * +--------------------------...+
1478cd7c4fcSpeterte 		 * uio_loffset->|
1488cd7c4fcSpeterte 		 * uio_resid....|<---------->|
1498cd7c4fcSpeterte 		 * diff.........|<-------------->|
1508cd7c4fcSpeterte 		 *
1518cd7c4fcSpeterte 		 * So, in this case our offset is not aligned
1528cd7c4fcSpeterte 		 * and our request takes us outside of the
1538cd7c4fcSpeterte 		 * MAXBSIZE window. We will break this up into
1548cd7c4fcSpeterte 		 * two segmap_getmapflt calls.
1558cd7c4fcSpeterte 		 */
1568cd7c4fcSpeterte 		size_t nbytes;
1578cd7c4fcSpeterte 		offset_t mapon;
1588cd7c4fcSpeterte 		size_t n;
1598cd7c4fcSpeterte 		uint_t flags;
1608cd7c4fcSpeterte 
1618cd7c4fcSpeterte 		mapon = uiop->uio_loffset & MAXBOFFSET;
1628cd7c4fcSpeterte 		diff = filesize - uiop->uio_loffset;
1638cd7c4fcSpeterte 		nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
1648cd7c4fcSpeterte 		n = MIN(diff, nbytes);
1658cd7c4fcSpeterte 		if (n <= 0) {
1668cd7c4fcSpeterte 			/* EOF or request satisfied. */
1678cd7c4fcSpeterte 			return (0);
1687c478bd9Sstevel@tonic-gate 		}
1697c478bd9Sstevel@tonic-gate 
1708cd7c4fcSpeterte 		base = segmap_getmapflt(segkmap, vp,
1718cd7c4fcSpeterte 		    (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
1728cd7c4fcSpeterte 
1738cd7c4fcSpeterte 		error = uiomove(base + mapon, n, UIO_READ, uiop);
1747c478bd9Sstevel@tonic-gate 
1757c478bd9Sstevel@tonic-gate 		if (error == 0) {
1767c478bd9Sstevel@tonic-gate 			/*
1777c478bd9Sstevel@tonic-gate 			 * if read a whole block, or read to eof,
1787c478bd9Sstevel@tonic-gate 			 *  won't need this buffer again soon.
1797c478bd9Sstevel@tonic-gate 			 */
1808cd7c4fcSpeterte 			if (n + mapon == MAXBSIZE ||
1818cd7c4fcSpeterte 			    uiop->uio_loffset == filesize)
1827c478bd9Sstevel@tonic-gate 				flags = SM_DONTNEED;
1837c478bd9Sstevel@tonic-gate 			else
1847c478bd9Sstevel@tonic-gate 				flags = 0;
1857c478bd9Sstevel@tonic-gate 			error = segmap_release(segkmap, base, flags);
1867c478bd9Sstevel@tonic-gate 		} else
1877c478bd9Sstevel@tonic-gate 			(void) segmap_release(segkmap, base, 0);
1887c478bd9Sstevel@tonic-gate 	} while (error == 0 && uiop->uio_resid > 0);
1897c478bd9Sstevel@tonic-gate 
1907c478bd9Sstevel@tonic-gate 	return (error);
1917c478bd9Sstevel@tonic-gate }
1927c478bd9Sstevel@tonic-gate 
1937c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
1947c478bd9Sstevel@tonic-gate static int
1957c478bd9Sstevel@tonic-gate hsfs_getattr(
1967c478bd9Sstevel@tonic-gate 	struct vnode *vp,
1977c478bd9Sstevel@tonic-gate 	struct vattr *vap,
1987c478bd9Sstevel@tonic-gate 	int flags,
1997c478bd9Sstevel@tonic-gate 	struct cred *cred)
2007c478bd9Sstevel@tonic-gate {
2017c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
2027c478bd9Sstevel@tonic-gate 	struct vfs *vfsp;
2037c478bd9Sstevel@tonic-gate 	struct hsfs *fsp;
2047c478bd9Sstevel@tonic-gate 
2057c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
2067c478bd9Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
2077c478bd9Sstevel@tonic-gate 	vfsp = vp->v_vfsp;
2087c478bd9Sstevel@tonic-gate 
2097c478bd9Sstevel@tonic-gate 	if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
2107c478bd9Sstevel@tonic-gate 		hs_filldirent(vp, &hp->hs_dirent);
2117c478bd9Sstevel@tonic-gate 	}
2127c478bd9Sstevel@tonic-gate 	vap->va_type = IFTOVT(hp->hs_dirent.mode);
2137c478bd9Sstevel@tonic-gate 	vap->va_mode = hp->hs_dirent.mode;
2147c478bd9Sstevel@tonic-gate 	vap->va_uid = hp->hs_dirent.uid;
2157c478bd9Sstevel@tonic-gate 	vap->va_gid = hp->hs_dirent.gid;
2167c478bd9Sstevel@tonic-gate 
2177c478bd9Sstevel@tonic-gate 	vap->va_fsid = vfsp->vfs_dev;
2187c478bd9Sstevel@tonic-gate 	vap->va_nodeid = (ino64_t)hp->hs_nodeid;
2197c478bd9Sstevel@tonic-gate 	vap->va_nlink = hp->hs_dirent.nlink;
2207c478bd9Sstevel@tonic-gate 	vap->va_size =	(offset_t)hp->hs_dirent.ext_size;
2217c478bd9Sstevel@tonic-gate 
2227c478bd9Sstevel@tonic-gate 	vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
2237c478bd9Sstevel@tonic-gate 	vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
2247c478bd9Sstevel@tonic-gate 	vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
2257c478bd9Sstevel@tonic-gate 	vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
2267c478bd9Sstevel@tonic-gate 	vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
2277c478bd9Sstevel@tonic-gate 	vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
2287c478bd9Sstevel@tonic-gate 	if (vp->v_type == VCHR || vp->v_type == VBLK)
2297c478bd9Sstevel@tonic-gate 		vap->va_rdev = hp->hs_dirent.r_dev;
2307c478bd9Sstevel@tonic-gate 	else
2317c478bd9Sstevel@tonic-gate 		vap->va_rdev = 0;
2327c478bd9Sstevel@tonic-gate 	vap->va_blksize = vfsp->vfs_bsize;
2337c478bd9Sstevel@tonic-gate 	/* no. of blocks = no. of data blocks + no. of xar blocks */
2347c478bd9Sstevel@tonic-gate 	vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
2357c478bd9Sstevel@tonic-gate 	    (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
2367c478bd9Sstevel@tonic-gate 	vap->va_seq = hp->hs_seq;
2377c478bd9Sstevel@tonic-gate 	return (0);
2387c478bd9Sstevel@tonic-gate }
2397c478bd9Sstevel@tonic-gate 
2407c478bd9Sstevel@tonic-gate /*ARGSUSED*/
2417c478bd9Sstevel@tonic-gate static int
2427c478bd9Sstevel@tonic-gate hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred)
2437c478bd9Sstevel@tonic-gate {
2447c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
2457c478bd9Sstevel@tonic-gate 
2467c478bd9Sstevel@tonic-gate 	if (vp->v_type != VLNK)
2477c478bd9Sstevel@tonic-gate 		return (EINVAL);
2487c478bd9Sstevel@tonic-gate 
2497c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
2507c478bd9Sstevel@tonic-gate 
2517c478bd9Sstevel@tonic-gate 	if (hp->hs_dirent.sym_link == (char *)NULL)
2527c478bd9Sstevel@tonic-gate 		return (ENOENT);
2537c478bd9Sstevel@tonic-gate 
2547c478bd9Sstevel@tonic-gate 	return (uiomove(hp->hs_dirent.sym_link,
2557c478bd9Sstevel@tonic-gate 	    (size_t)MIN(hp->hs_dirent.ext_size,
2567c478bd9Sstevel@tonic-gate 	    uiop->uio_resid), UIO_READ, uiop));
2577c478bd9Sstevel@tonic-gate }
2587c478bd9Sstevel@tonic-gate 
2597c478bd9Sstevel@tonic-gate /*ARGSUSED*/
2607c478bd9Sstevel@tonic-gate static void
2617c478bd9Sstevel@tonic-gate hsfs_inactive(struct vnode *vp, struct cred *cred)
2627c478bd9Sstevel@tonic-gate {
2637c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
2647c478bd9Sstevel@tonic-gate 	struct hsfs *fsp;
2657c478bd9Sstevel@tonic-gate 
2667c478bd9Sstevel@tonic-gate 	int nopage;
2677c478bd9Sstevel@tonic-gate 
2687c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
2697c478bd9Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
2707c478bd9Sstevel@tonic-gate 	/*
2717c478bd9Sstevel@tonic-gate 	 * Note: acquiring and holding v_lock for quite a while
2727c478bd9Sstevel@tonic-gate 	 * here serializes on the vnode; this is unfortunate, but
2737c478bd9Sstevel@tonic-gate 	 * likely not to overly impact performance, as the underlying
2747c478bd9Sstevel@tonic-gate 	 * device (CDROM drive) is quite slow.
2757c478bd9Sstevel@tonic-gate 	 */
2767c478bd9Sstevel@tonic-gate 	rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
2777c478bd9Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
2787c478bd9Sstevel@tonic-gate 	mutex_enter(&vp->v_lock);
2797c478bd9Sstevel@tonic-gate 
2807c478bd9Sstevel@tonic-gate 	if (vp->v_count < 1) {
2817c478bd9Sstevel@tonic-gate 		panic("hsfs_inactive: v_count < 1");
2827c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
2837c478bd9Sstevel@tonic-gate 	}
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate 	if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
2867c478bd9Sstevel@tonic-gate 		vp->v_count--;	/* release hold from vn_rele */
2877c478bd9Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
2887c478bd9Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
2897c478bd9Sstevel@tonic-gate 		rw_exit(&fsp->hsfs_hash_lock);
2907c478bd9Sstevel@tonic-gate 		return;
2917c478bd9Sstevel@tonic-gate 	}
2927c478bd9Sstevel@tonic-gate 	vp->v_count--;	/* release hold from vn_rele */
2937c478bd9Sstevel@tonic-gate 	if (vp->v_count == 0) {
2947c478bd9Sstevel@tonic-gate 		/*
2957c478bd9Sstevel@tonic-gate 		 * Free the hsnode.
2967c478bd9Sstevel@tonic-gate 		 * If there are no pages associated with the
2977c478bd9Sstevel@tonic-gate 		 * hsnode, give it back to the kmem_cache,
2987c478bd9Sstevel@tonic-gate 		 * else put at the end of this file system's
2997c478bd9Sstevel@tonic-gate 		 * internal free list.
3007c478bd9Sstevel@tonic-gate 		 */
3017c478bd9Sstevel@tonic-gate 		nopage = !vn_has_cached_data(vp);
3027c478bd9Sstevel@tonic-gate 		hp->hs_flags = 0;
3037c478bd9Sstevel@tonic-gate 		/*
3047c478bd9Sstevel@tonic-gate 		 * exit these locks now, since hs_freenode may
3057c478bd9Sstevel@tonic-gate 		 * kmem_free the hsnode and embedded vnode
3067c478bd9Sstevel@tonic-gate 		 */
3077c478bd9Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
3087c478bd9Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
3097c478bd9Sstevel@tonic-gate 		hs_freenode(vp, fsp, nopage);
3107c478bd9Sstevel@tonic-gate 	} else {
3117c478bd9Sstevel@tonic-gate 		mutex_exit(&vp->v_lock);
3127c478bd9Sstevel@tonic-gate 		mutex_exit(&hp->hs_contents_lock);
3137c478bd9Sstevel@tonic-gate 	}
3147c478bd9Sstevel@tonic-gate 	rw_exit(&fsp->hsfs_hash_lock);
3157c478bd9Sstevel@tonic-gate }
3167c478bd9Sstevel@tonic-gate 
3177c478bd9Sstevel@tonic-gate 
3187c478bd9Sstevel@tonic-gate /*ARGSUSED*/
3197c478bd9Sstevel@tonic-gate static int
3207c478bd9Sstevel@tonic-gate hsfs_lookup(
3217c478bd9Sstevel@tonic-gate 	struct vnode *dvp,
3227c478bd9Sstevel@tonic-gate 	char *nm,
3237c478bd9Sstevel@tonic-gate 	struct vnode **vpp,
3247c478bd9Sstevel@tonic-gate 	struct pathname *pnp,
3257c478bd9Sstevel@tonic-gate 	int flags,
3267c478bd9Sstevel@tonic-gate 	struct vnode *rdir,
3277c478bd9Sstevel@tonic-gate 	struct cred *cred)
3287c478bd9Sstevel@tonic-gate {
3297c478bd9Sstevel@tonic-gate 	int error;
3307c478bd9Sstevel@tonic-gate 	int namelen = (int)strlen(nm);
3317c478bd9Sstevel@tonic-gate 
3327c478bd9Sstevel@tonic-gate 	if (*nm == '\0') {
3337c478bd9Sstevel@tonic-gate 		VN_HOLD(dvp);
3347c478bd9Sstevel@tonic-gate 		*vpp = dvp;
3357c478bd9Sstevel@tonic-gate 		return (0);
3367c478bd9Sstevel@tonic-gate 	}
3377c478bd9Sstevel@tonic-gate 
3387c478bd9Sstevel@tonic-gate 	/*
3397c478bd9Sstevel@tonic-gate 	 * If we're looking for ourself, life is simple.
3407c478bd9Sstevel@tonic-gate 	 */
3417c478bd9Sstevel@tonic-gate 	if (namelen == 1 && *nm == '.') {
3427c478bd9Sstevel@tonic-gate 		if (error = hs_access(dvp, (mode_t)VEXEC, cred))
3437c478bd9Sstevel@tonic-gate 			return (error);
3447c478bd9Sstevel@tonic-gate 		VN_HOLD(dvp);
3457c478bd9Sstevel@tonic-gate 		*vpp = dvp;
3467c478bd9Sstevel@tonic-gate 		return (0);
3477c478bd9Sstevel@tonic-gate 	}
3487c478bd9Sstevel@tonic-gate 
3497c478bd9Sstevel@tonic-gate 	return (hs_dirlook(dvp, nm, namelen, vpp, cred));
3507c478bd9Sstevel@tonic-gate }
3517c478bd9Sstevel@tonic-gate 
3527c478bd9Sstevel@tonic-gate 
3537c478bd9Sstevel@tonic-gate /*ARGSUSED*/
3547c478bd9Sstevel@tonic-gate static int
3557c478bd9Sstevel@tonic-gate hsfs_readdir(
3567c478bd9Sstevel@tonic-gate 	struct vnode	*vp,
3577c478bd9Sstevel@tonic-gate 	struct uio	*uiop,
3587c478bd9Sstevel@tonic-gate 	struct cred	*cred,
3597c478bd9Sstevel@tonic-gate 	int		*eofp)
3607c478bd9Sstevel@tonic-gate {
3617c478bd9Sstevel@tonic-gate 	struct hsnode	*dhp;
3627c478bd9Sstevel@tonic-gate 	struct hsfs	*fsp;
3637c478bd9Sstevel@tonic-gate 	struct hs_direntry hd;
3647c478bd9Sstevel@tonic-gate 	struct dirent64	*nd;
3657c478bd9Sstevel@tonic-gate 	int		error;
3667c478bd9Sstevel@tonic-gate 	uint_t		offset;		/* real offset in directory */
3677c478bd9Sstevel@tonic-gate 	uint_t		dirsiz;		/* real size of directory */
3687c478bd9Sstevel@tonic-gate 	uchar_t		*blkp;
3697c478bd9Sstevel@tonic-gate 	int		hdlen;		/* length of hs directory entry */
3707c478bd9Sstevel@tonic-gate 	long		ndlen;		/* length of dirent entry */
3717c478bd9Sstevel@tonic-gate 	int		bytes_wanted;
3727c478bd9Sstevel@tonic-gate 	size_t		bufsize;	/* size of dirent buffer */
3737c478bd9Sstevel@tonic-gate 	char		*outbuf;	/* ptr to dirent buffer */
3747c478bd9Sstevel@tonic-gate 	char		*dname;
3757c478bd9Sstevel@tonic-gate 	int		dnamelen;
3767c478bd9Sstevel@tonic-gate 	size_t		dname_size;
3777c478bd9Sstevel@tonic-gate 	struct fbuf	*fbp;
3787c478bd9Sstevel@tonic-gate 	uint_t		last_offset;	/* last index into current dir block */
3797c478bd9Sstevel@tonic-gate 	ulong_t		dir_lbn;	/* lbn of directory */
3807c478bd9Sstevel@tonic-gate 	ino64_t		dirino;	/* temporary storage before storing in dirent */
3817c478bd9Sstevel@tonic-gate 	off_t		diroff;
3827c478bd9Sstevel@tonic-gate 
3837c478bd9Sstevel@tonic-gate 	dhp = VTOH(vp);
3847c478bd9Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
3857c478bd9Sstevel@tonic-gate 	if (dhp->hs_dirent.ext_size == 0)
3867c478bd9Sstevel@tonic-gate 		hs_filldirent(vp, &dhp->hs_dirent);
3877c478bd9Sstevel@tonic-gate 	dirsiz = dhp->hs_dirent.ext_size;
3887c478bd9Sstevel@tonic-gate 	dir_lbn = dhp->hs_dirent.ext_lbn;
3897c478bd9Sstevel@tonic-gate 	if (uiop->uio_loffset >= dirsiz) {	/* at or beyond EOF */
3907c478bd9Sstevel@tonic-gate 		if (eofp)
3917c478bd9Sstevel@tonic-gate 			*eofp = 1;
3927c478bd9Sstevel@tonic-gate 		return (0);
3937c478bd9Sstevel@tonic-gate 	}
3949cbc422eSpeterte 	ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
3959cbc422eSpeterte 	offset = uiop->uio_loffset;
3967c478bd9Sstevel@tonic-gate 
3977c478bd9Sstevel@tonic-gate 	dname_size = fsp->hsfs_namemax + 1;	/* 1 for the ending NUL */
3987c478bd9Sstevel@tonic-gate 	dname = kmem_alloc(dname_size, KM_SLEEP);
3997c478bd9Sstevel@tonic-gate 	bufsize = uiop->uio_resid + sizeof (struct dirent64);
4007c478bd9Sstevel@tonic-gate 
4017c478bd9Sstevel@tonic-gate 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
4027c478bd9Sstevel@tonic-gate 	nd = (struct dirent64 *)outbuf;
4037c478bd9Sstevel@tonic-gate 
4047c478bd9Sstevel@tonic-gate 	while (offset < dirsiz) {
405cf83459aSfrankho 		bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
4067c478bd9Sstevel@tonic-gate 
4077c478bd9Sstevel@tonic-gate 		error = fbread(vp, (offset_t)(offset & MAXBMASK),
4087c478bd9Sstevel@tonic-gate 			(unsigned int)bytes_wanted, S_READ, &fbp);
4097c478bd9Sstevel@tonic-gate 		if (error)
4107c478bd9Sstevel@tonic-gate 			goto done;
4117c478bd9Sstevel@tonic-gate 
4127c478bd9Sstevel@tonic-gate 		blkp = (uchar_t *)fbp->fb_addr;
413cf83459aSfrankho 		last_offset = (offset & MAXBMASK) + fbp->fb_count;
4147c478bd9Sstevel@tonic-gate 
4157c478bd9Sstevel@tonic-gate #define	rel_offset(offset) ((offset) & MAXBOFFSET)	/* index into blkp */
4167c478bd9Sstevel@tonic-gate 
4177c478bd9Sstevel@tonic-gate 		while (offset < last_offset) {
4187c478bd9Sstevel@tonic-gate 			/*
419cf83459aSfrankho 			 * Very similar validation code is found in
420cf83459aSfrankho 			 * process_dirblock(), hsfs_node.c.
421cf83459aSfrankho 			 * For an explanation, see there.
422cf83459aSfrankho 			 * It may make sense for the future to
423cf83459aSfrankho 			 * "consolidate" the code in hs_parsedir(),
424cf83459aSfrankho 			 * process_dirblock() and hsfs_readdir() into
425cf83459aSfrankho 			 * a single utility function.
4267c478bd9Sstevel@tonic-gate 			 */
4277c478bd9Sstevel@tonic-gate 			hdlen = (int)((uchar_t)
4287c478bd9Sstevel@tonic-gate 				HDE_DIR_LEN(&blkp[rel_offset(offset)]));
429cf83459aSfrankho 			if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
430cf83459aSfrankho 			    offset + hdlen > last_offset) {
4317c478bd9Sstevel@tonic-gate 				/*
432cf83459aSfrankho 				 * advance to next sector boundary
4337c478bd9Sstevel@tonic-gate 				 */
434cf83459aSfrankho 				offset = roundup(offset + 1, HS_SECTOR_SIZE);
435cf83459aSfrankho 				if (hdlen)
436cf83459aSfrankho 					hs_log_bogus_disk_warning(fsp,
437cf83459aSfrankho 					    HSFS_ERR_TRAILING_JUNK, 0);
438cf83459aSfrankho 
439cf83459aSfrankho 				continue;
4407c478bd9Sstevel@tonic-gate 			}
4417c478bd9Sstevel@tonic-gate 
4427c478bd9Sstevel@tonic-gate 			bzero(&hd, sizeof (hd));
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate 			/*
4457c478bd9Sstevel@tonic-gate 			 * Just ignore invalid directory entries.
4467c478bd9Sstevel@tonic-gate 			 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
4477c478bd9Sstevel@tonic-gate 			 */
4487c478bd9Sstevel@tonic-gate 			if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
449fc1c62b8Sfrankho 				&hd, dname, &dnamelen,
450fc1c62b8Sfrankho 					last_offset - rel_offset(offset))) {
4517c478bd9Sstevel@tonic-gate 				/*
4527c478bd9Sstevel@tonic-gate 				 * Determine if there is enough room
4537c478bd9Sstevel@tonic-gate 				 */
4547c478bd9Sstevel@tonic-gate 				ndlen = (long)DIRENT64_RECLEN((dnamelen));
4557c478bd9Sstevel@tonic-gate 
4567c478bd9Sstevel@tonic-gate 				if ((ndlen + ((char *)nd - outbuf)) >
4577c478bd9Sstevel@tonic-gate 				    uiop->uio_resid) {
4587c478bd9Sstevel@tonic-gate 					fbrelse(fbp, S_READ);
4597c478bd9Sstevel@tonic-gate 					goto done; /* output buffer full */
4607c478bd9Sstevel@tonic-gate 				}
4617c478bd9Sstevel@tonic-gate 
4627c478bd9Sstevel@tonic-gate 				diroff = offset + hdlen;
4637c478bd9Sstevel@tonic-gate 				/*
4647c478bd9Sstevel@tonic-gate 				 * Generate nodeid.
4657c478bd9Sstevel@tonic-gate 				 * If a directory, nodeid points to the
4667c478bd9Sstevel@tonic-gate 				 * canonical dirent describing the directory:
4677c478bd9Sstevel@tonic-gate 				 * the dirent of the "." entry for the
4687c478bd9Sstevel@tonic-gate 				 * directory, which is pointed to by all
4697c478bd9Sstevel@tonic-gate 				 * dirents for that directory.
4707c478bd9Sstevel@tonic-gate 				 * Otherwise, nodeid points to dirent of file.
4717c478bd9Sstevel@tonic-gate 				 */
4727c478bd9Sstevel@tonic-gate 				if (hd.type == VDIR) {
4737c478bd9Sstevel@tonic-gate 					dirino = (ino64_t)
4747c478bd9Sstevel@tonic-gate 					    MAKE_NODEID(hd.ext_lbn, 0,
4757c478bd9Sstevel@tonic-gate 					    vp->v_vfsp);
4767c478bd9Sstevel@tonic-gate 				} else {
4777c478bd9Sstevel@tonic-gate 					struct hs_volume *hvp;
4787c478bd9Sstevel@tonic-gate 					offset_t lbn, off;
4797c478bd9Sstevel@tonic-gate 
4807c478bd9Sstevel@tonic-gate 					/*
4817c478bd9Sstevel@tonic-gate 					 * Normalize lbn and off
4827c478bd9Sstevel@tonic-gate 					 */
4837c478bd9Sstevel@tonic-gate 					hvp = &fsp->hsfs_vol;
4847c478bd9Sstevel@tonic-gate 					lbn = dir_lbn +
4857c478bd9Sstevel@tonic-gate 					    (offset >> hvp->lbn_shift);
4867c478bd9Sstevel@tonic-gate 					off = offset & hvp->lbn_maxoffset;
4877c478bd9Sstevel@tonic-gate 					dirino = (ino64_t)MAKE_NODEID(lbn,
4887c478bd9Sstevel@tonic-gate 					    off, vp->v_vfsp);
4897c478bd9Sstevel@tonic-gate 				}
4907c478bd9Sstevel@tonic-gate 
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate 				/* strncpy(9f) will zero uninitialized bytes */
4937c478bd9Sstevel@tonic-gate 
4947c478bd9Sstevel@tonic-gate 				ASSERT(strlen(dname) + 1 <=
4957c478bd9Sstevel@tonic-gate 				    DIRENT64_NAMELEN(ndlen));
4967c478bd9Sstevel@tonic-gate 				(void) strncpy(nd->d_name, dname,
4977c478bd9Sstevel@tonic-gate 				    DIRENT64_NAMELEN(ndlen));
4987c478bd9Sstevel@tonic-gate 				nd->d_reclen = (ushort_t)ndlen;
4997c478bd9Sstevel@tonic-gate 				nd->d_off = (offset_t)diroff;
5007c478bd9Sstevel@tonic-gate 				nd->d_ino = dirino;
5017c478bd9Sstevel@tonic-gate 				nd = (struct dirent64 *)((char *)nd + ndlen);
5027c478bd9Sstevel@tonic-gate 
5037c478bd9Sstevel@tonic-gate 				/*
5047c478bd9Sstevel@tonic-gate 				 * free up space allocated for symlink
5057c478bd9Sstevel@tonic-gate 				 */
5067c478bd9Sstevel@tonic-gate 				if (hd.sym_link != (char *)NULL) {
5077c478bd9Sstevel@tonic-gate 					kmem_free(hd.sym_link,
5087c478bd9Sstevel@tonic-gate 					    (size_t)(hd.ext_size+1));
5097c478bd9Sstevel@tonic-gate 					hd.sym_link = (char *)NULL;
5107c478bd9Sstevel@tonic-gate 				}
5117c478bd9Sstevel@tonic-gate 			}
5127c478bd9Sstevel@tonic-gate 			offset += hdlen;
5137c478bd9Sstevel@tonic-gate 		}
5147c478bd9Sstevel@tonic-gate 		fbrelse(fbp, S_READ);
5157c478bd9Sstevel@tonic-gate 	}
5167c478bd9Sstevel@tonic-gate 
5177c478bd9Sstevel@tonic-gate 	/*
5187c478bd9Sstevel@tonic-gate 	 * Got here for one of the following reasons:
5197c478bd9Sstevel@tonic-gate 	 *	1) outbuf is full (error == 0)
5207c478bd9Sstevel@tonic-gate 	 *	2) end of directory reached (error == 0)
5217c478bd9Sstevel@tonic-gate 	 *	3) error reading directory sector (error != 0)
5227c478bd9Sstevel@tonic-gate 	 *	4) directory entry crosses sector boundary (error == 0)
5237c478bd9Sstevel@tonic-gate 	 *
5247c478bd9Sstevel@tonic-gate 	 * If any directory entries have been copied, don't report
5257c478bd9Sstevel@tonic-gate 	 * case 4.  Instead, return the valid directory entries.
5267c478bd9Sstevel@tonic-gate 	 *
5277c478bd9Sstevel@tonic-gate 	 * If no entries have been copied, report the error.
5287c478bd9Sstevel@tonic-gate 	 * If case 4, this will be indistiguishable from EOF.
5297c478bd9Sstevel@tonic-gate 	 */
5307c478bd9Sstevel@tonic-gate done:
5317c478bd9Sstevel@tonic-gate 	ndlen = ((char *)nd - outbuf);
5327c478bd9Sstevel@tonic-gate 	if (ndlen != 0) {
5337c478bd9Sstevel@tonic-gate 		error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
5349cbc422eSpeterte 		uiop->uio_loffset = offset;
5357c478bd9Sstevel@tonic-gate 	}
5367c478bd9Sstevel@tonic-gate 	kmem_free(dname, dname_size);
5377c478bd9Sstevel@tonic-gate 	kmem_free(outbuf, bufsize);
5387c478bd9Sstevel@tonic-gate 	if (eofp && error == 0)
5399cbc422eSpeterte 		*eofp = (uiop->uio_loffset >= dirsiz);
5407c478bd9Sstevel@tonic-gate 	return (error);
5417c478bd9Sstevel@tonic-gate }
5427c478bd9Sstevel@tonic-gate 
5437c478bd9Sstevel@tonic-gate static int
5447c478bd9Sstevel@tonic-gate hsfs_fid(struct vnode *vp, struct fid *fidp)
5457c478bd9Sstevel@tonic-gate {
5467c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
5477c478bd9Sstevel@tonic-gate 	struct hsfid *fid;
5487c478bd9Sstevel@tonic-gate 
5497c478bd9Sstevel@tonic-gate 	if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
5507c478bd9Sstevel@tonic-gate 		fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
5517c478bd9Sstevel@tonic-gate 		return (ENOSPC);
5527c478bd9Sstevel@tonic-gate 	}
5537c478bd9Sstevel@tonic-gate 
5547c478bd9Sstevel@tonic-gate 	fid = (struct hsfid *)fidp;
5557c478bd9Sstevel@tonic-gate 	fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
5567c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
5577c478bd9Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
5587c478bd9Sstevel@tonic-gate 	fid->hf_dir_lbn = hp->hs_dir_lbn;
5597c478bd9Sstevel@tonic-gate 	fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
5607c478bd9Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
5617c478bd9Sstevel@tonic-gate 	return (0);
5627c478bd9Sstevel@tonic-gate }
5637c478bd9Sstevel@tonic-gate 
5647c478bd9Sstevel@tonic-gate /*ARGSUSED*/
5657c478bd9Sstevel@tonic-gate static int
5667c478bd9Sstevel@tonic-gate hsfs_open(struct vnode **vpp, int flag, struct cred *cred)
5677c478bd9Sstevel@tonic-gate {
5687c478bd9Sstevel@tonic-gate 	return (0);
5697c478bd9Sstevel@tonic-gate }
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate /*ARGSUSED*/
5727c478bd9Sstevel@tonic-gate static int
5737c478bd9Sstevel@tonic-gate hsfs_close(
5747c478bd9Sstevel@tonic-gate 	struct vnode *vp,
5757c478bd9Sstevel@tonic-gate 	int flag,
5767c478bd9Sstevel@tonic-gate 	int count,
5777c478bd9Sstevel@tonic-gate 	offset_t offset,
5787c478bd9Sstevel@tonic-gate 	struct cred *cred)
5797c478bd9Sstevel@tonic-gate {
5807c478bd9Sstevel@tonic-gate 	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
5817c478bd9Sstevel@tonic-gate 	cleanshares(vp, ttoproc(curthread)->p_pid);
5827c478bd9Sstevel@tonic-gate 	return (0);
5837c478bd9Sstevel@tonic-gate }
5847c478bd9Sstevel@tonic-gate 
5857c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
5867c478bd9Sstevel@tonic-gate static int
5877c478bd9Sstevel@tonic-gate hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred)
5887c478bd9Sstevel@tonic-gate {
5897c478bd9Sstevel@tonic-gate 	return (hs_access(vp, (mode_t)mode, cred));
5907c478bd9Sstevel@tonic-gate }
5917c478bd9Sstevel@tonic-gate 
5927c478bd9Sstevel@tonic-gate /*
5937c478bd9Sstevel@tonic-gate  * the seek time of a CD-ROM is very slow, and data transfer
5947c478bd9Sstevel@tonic-gate  * rate is even worse (max. 150K per sec).  The design
5957c478bd9Sstevel@tonic-gate  * decision is to reduce access to cd-rom as much as possible,
5967c478bd9Sstevel@tonic-gate  * and to transfer a sizable block (read-ahead) of data at a time.
5977c478bd9Sstevel@tonic-gate  * UFS style of read ahead one block at a time is not appropriate,
5987c478bd9Sstevel@tonic-gate  * and is not supported
5997c478bd9Sstevel@tonic-gate  */
6007c478bd9Sstevel@tonic-gate 
6017c478bd9Sstevel@tonic-gate /*
6027c478bd9Sstevel@tonic-gate  * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
6037c478bd9Sstevel@tonic-gate  */
6047c478bd9Sstevel@tonic-gate #define	KLUSTSIZE	(56 * 1024)
6057c478bd9Sstevel@tonic-gate /* we don't support read ahead */
6067c478bd9Sstevel@tonic-gate int hsfs_lostpage;	/* no. of times we lost original page */
6077c478bd9Sstevel@tonic-gate 
6087c478bd9Sstevel@tonic-gate /*
6097c478bd9Sstevel@tonic-gate  * Used to prevent biodone() from releasing buf resources that
6107c478bd9Sstevel@tonic-gate  * we didn't allocate in quite the usual way.
6117c478bd9Sstevel@tonic-gate  */
6127c478bd9Sstevel@tonic-gate /*ARGSUSED*/
6137c478bd9Sstevel@tonic-gate int
6147c478bd9Sstevel@tonic-gate hsfs_iodone(struct buf *bp)
6157c478bd9Sstevel@tonic-gate {
6167c478bd9Sstevel@tonic-gate 	sema_v(&bp->b_io);
6177c478bd9Sstevel@tonic-gate 	return (0);
6187c478bd9Sstevel@tonic-gate }
6197c478bd9Sstevel@tonic-gate 
6207c478bd9Sstevel@tonic-gate /*
6217c478bd9Sstevel@tonic-gate  * Each file may have a different interleaving on disk.  This makes
6227c478bd9Sstevel@tonic-gate  * things somewhat interesting.  The gist is that there are some
6237c478bd9Sstevel@tonic-gate  * number of contiguous data sectors, followed by some other number
6247c478bd9Sstevel@tonic-gate  * of contiguous skip sectors.  The sum of those two sets of sectors
6257c478bd9Sstevel@tonic-gate  * defines the interleave size.  Unfortunately, it means that we generally
6267c478bd9Sstevel@tonic-gate  * can't simply read N sectors starting at a given offset to satisfy
6277c478bd9Sstevel@tonic-gate  * any given request.
6287c478bd9Sstevel@tonic-gate  *
6297c478bd9Sstevel@tonic-gate  * What we do is get the relevant memory pages via pvn_read_kluster(),
6307c478bd9Sstevel@tonic-gate  * then stride through the interleaves, setting up a buf for each
6317c478bd9Sstevel@tonic-gate  * sector that needs to be brought in.  Instead of kmem_alloc'ing
6327c478bd9Sstevel@tonic-gate  * space for the sectors, though, we just point at the appropriate
6337c478bd9Sstevel@tonic-gate  * spot in the relevant page for each of them.  This saves us a bunch
6347c478bd9Sstevel@tonic-gate  * of copying.
6357c478bd9Sstevel@tonic-gate  */
6367c478bd9Sstevel@tonic-gate /*ARGSUSED*/
6377c478bd9Sstevel@tonic-gate static int
6387c478bd9Sstevel@tonic-gate hsfs_getapage(
6397c478bd9Sstevel@tonic-gate 	struct vnode *vp,
6407c478bd9Sstevel@tonic-gate 	u_offset_t off,
6417c478bd9Sstevel@tonic-gate 	size_t len,
6427c478bd9Sstevel@tonic-gate 	uint_t *protp,
6437c478bd9Sstevel@tonic-gate 	struct page *pl[],
6447c478bd9Sstevel@tonic-gate 	size_t plsz,
6457c478bd9Sstevel@tonic-gate 	struct seg *seg,
6467c478bd9Sstevel@tonic-gate 	caddr_t addr,
6477c478bd9Sstevel@tonic-gate 	enum seg_rw rw,
6487c478bd9Sstevel@tonic-gate 	struct cred *cred)
6497c478bd9Sstevel@tonic-gate {
6507c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
6517c478bd9Sstevel@tonic-gate 	struct hsfs *fsp;
6527c478bd9Sstevel@tonic-gate 	int	err;
6537c478bd9Sstevel@tonic-gate 	struct buf *bufs;
6547c478bd9Sstevel@tonic-gate 	caddr_t *vas;
6557c478bd9Sstevel@tonic-gate 	caddr_t va;
6567c478bd9Sstevel@tonic-gate 	struct page *pp, *searchp, *lastp;
6577c478bd9Sstevel@tonic-gate 	page_t	*pagefound;
6587c478bd9Sstevel@tonic-gate 	offset_t	bof;
6597c478bd9Sstevel@tonic-gate 	struct vnode *devvp;
6607c478bd9Sstevel@tonic-gate 	ulong_t	byte_offset;
6617c478bd9Sstevel@tonic-gate 	size_t	io_len_tmp;
6627c478bd9Sstevel@tonic-gate 	uint_t	io_off, io_len;
6637c478bd9Sstevel@tonic-gate 	uint_t	xlen;
6647c478bd9Sstevel@tonic-gate 	uint_t	filsiz;
6657c478bd9Sstevel@tonic-gate 	uint_t	secsize;
6667c478bd9Sstevel@tonic-gate 	uint_t	bufcnt;
6677c478bd9Sstevel@tonic-gate 	uint_t	bufsused;
6687c478bd9Sstevel@tonic-gate 	uint_t	count;
6697c478bd9Sstevel@tonic-gate 	uint_t	io_end;
6707c478bd9Sstevel@tonic-gate 	uint_t	which_chunk_lbn;
6717c478bd9Sstevel@tonic-gate 	uint_t	offset_lbn;
6727c478bd9Sstevel@tonic-gate 	uint_t	offset_extra;
6737c478bd9Sstevel@tonic-gate 	offset_t	offset_bytes;
6747c478bd9Sstevel@tonic-gate 	uint_t	remaining_bytes;
6757c478bd9Sstevel@tonic-gate 	uint_t	extension;
6767c478bd9Sstevel@tonic-gate 	int	remainder;	/* must be signed */
6777c478bd9Sstevel@tonic-gate 	int	chunk_lbn_count;
6787c478bd9Sstevel@tonic-gate 	int	chunk_data_bytes;
6797c478bd9Sstevel@tonic-gate 	int	xarsiz;
6807c478bd9Sstevel@tonic-gate 	diskaddr_t driver_block;
6817c478bd9Sstevel@tonic-gate 	u_offset_t io_off_tmp;
6827c478bd9Sstevel@tonic-gate 
6837c478bd9Sstevel@tonic-gate 	/*
6847c478bd9Sstevel@tonic-gate 	 * We don't support asynchronous operation at the moment, so
6857c478bd9Sstevel@tonic-gate 	 * just pretend we did it.  If the pages are ever actually
6867c478bd9Sstevel@tonic-gate 	 * needed, they'll get brought in then.
6877c478bd9Sstevel@tonic-gate 	 */
6887c478bd9Sstevel@tonic-gate 	if (pl == NULL)
6897c478bd9Sstevel@tonic-gate 		return (0);
6907c478bd9Sstevel@tonic-gate 
6917c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
6927c478bd9Sstevel@tonic-gate 	fsp = VFS_TO_HSFS(vp->v_vfsp);
6937c478bd9Sstevel@tonic-gate 	devvp = fsp->hsfs_devvp;
6947c478bd9Sstevel@tonic-gate 	secsize = fsp->hsfs_vol.lbn_size;  /* bytes per logical block */
6957c478bd9Sstevel@tonic-gate 
6967c478bd9Sstevel@tonic-gate 	/* file data size */
6977c478bd9Sstevel@tonic-gate 	filsiz = hp->hs_dirent.ext_size;
6987c478bd9Sstevel@tonic-gate 
6997c478bd9Sstevel@tonic-gate 	/* disk addr for start of file */
7007c478bd9Sstevel@tonic-gate 	bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
7017c478bd9Sstevel@tonic-gate 
7027c478bd9Sstevel@tonic-gate 	/* xarsiz byte must be skipped for data */
7037c478bd9Sstevel@tonic-gate 	xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
7047c478bd9Sstevel@tonic-gate 
7057c478bd9Sstevel@tonic-gate 	/* how many logical blocks in an interleave (data+skip) */
7067c478bd9Sstevel@tonic-gate 	chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
7077c478bd9Sstevel@tonic-gate 
7087c478bd9Sstevel@tonic-gate 	if (chunk_lbn_count == 0) {
7097c478bd9Sstevel@tonic-gate 		chunk_lbn_count = 1;
7107c478bd9Sstevel@tonic-gate 	}
7117c478bd9Sstevel@tonic-gate 
7127c478bd9Sstevel@tonic-gate 	/*
7137c478bd9Sstevel@tonic-gate 	 * Convert interleaving size into bytes.  The zero case
7147c478bd9Sstevel@tonic-gate 	 * (no interleaving) optimization is handled as a side-
7157c478bd9Sstevel@tonic-gate 	 * effect of the read-ahead logic.
7167c478bd9Sstevel@tonic-gate 	 */
7177c478bd9Sstevel@tonic-gate 	if (hp->hs_dirent.intlf_sz == 0) {
7187c478bd9Sstevel@tonic-gate 		chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
7197c478bd9Sstevel@tonic-gate 	} else {
7207c478bd9Sstevel@tonic-gate 		chunk_data_bytes = LBN_TO_BYTE(hp->hs_dirent.intlf_sz,
7217c478bd9Sstevel@tonic-gate 			vp->v_vfsp);
7227c478bd9Sstevel@tonic-gate 	}
7237c478bd9Sstevel@tonic-gate 
7247c478bd9Sstevel@tonic-gate reread:
7257c478bd9Sstevel@tonic-gate 	err = 0;
7267c478bd9Sstevel@tonic-gate 	pagefound = 0;
7277c478bd9Sstevel@tonic-gate 
7287c478bd9Sstevel@tonic-gate 	/*
7297c478bd9Sstevel@tonic-gate 	 * Do some read-ahead.  This mostly saves us a bit of
7307c478bd9Sstevel@tonic-gate 	 * system cpu time more than anything else when doing
7317c478bd9Sstevel@tonic-gate 	 * sequential reads.  At some point, could do the
7327c478bd9Sstevel@tonic-gate 	 * read-ahead asynchronously which might gain us something
7337c478bd9Sstevel@tonic-gate 	 * on wall time, but it seems unlikely....
7347c478bd9Sstevel@tonic-gate 	 *
7357c478bd9Sstevel@tonic-gate 	 * We do the easy case here, which is to read through
7367c478bd9Sstevel@tonic-gate 	 * the end of the chunk, minus whatever's at the end that
7377c478bd9Sstevel@tonic-gate 	 * won't exactly fill a page.
7387c478bd9Sstevel@tonic-gate 	 */
7397c478bd9Sstevel@tonic-gate 	which_chunk_lbn = (off + len) / chunk_data_bytes;
7407c478bd9Sstevel@tonic-gate 	extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
7417c478bd9Sstevel@tonic-gate 	extension -= (extension % PAGESIZE);
7427c478bd9Sstevel@tonic-gate 	if (extension != 0 && extension < filsiz - off) {
7437c478bd9Sstevel@tonic-gate 		len = extension;
7448cd7c4fcSpeterte 	} else {
7458cd7c4fcSpeterte 		len = PAGESIZE;
7467c478bd9Sstevel@tonic-gate 	}
7477c478bd9Sstevel@tonic-gate 	/*
7487c478bd9Sstevel@tonic-gate 	 * Some cd writers don't write sectors that aren't used.  Also,
7497c478bd9Sstevel@tonic-gate 	 * there's no point in reading sectors we'll never look at.  So,
7507c478bd9Sstevel@tonic-gate 	 * if we're asked to go beyond the end of a file, truncate to the
7517c478bd9Sstevel@tonic-gate 	 * length of that file.
7527c478bd9Sstevel@tonic-gate 	 *
7537c478bd9Sstevel@tonic-gate 	 * Additionally, this behaviour is required by section 6.4.5 of
7547c478bd9Sstevel@tonic-gate 	 * ISO 9660:1988(E).
7557c478bd9Sstevel@tonic-gate 	 */
7567c478bd9Sstevel@tonic-gate 	if (len > (filsiz - off)) {
7577c478bd9Sstevel@tonic-gate 		len = filsiz - off;
7587c478bd9Sstevel@tonic-gate 	}
7597c478bd9Sstevel@tonic-gate 
7608cd7c4fcSpeterte 	/* A little paranoia. */
7618cd7c4fcSpeterte 	ASSERT(len > 0);
7628cd7c4fcSpeterte 
7637c478bd9Sstevel@tonic-gate 	/*
7647c478bd9Sstevel@tonic-gate 	 * After all that, make sure we're asking for things in units
7657c478bd9Sstevel@tonic-gate 	 * that bdev_strategy() will understand (see bug 4202551).
7667c478bd9Sstevel@tonic-gate 	 */
7677c478bd9Sstevel@tonic-gate 	len = roundup(len, DEV_BSIZE);
7687c478bd9Sstevel@tonic-gate 
7697c478bd9Sstevel@tonic-gate 	pp = NULL;
7707c478bd9Sstevel@tonic-gate again:
7717c478bd9Sstevel@tonic-gate 	/* search for page in buffer */
7727c478bd9Sstevel@tonic-gate 	if ((pagefound = page_exists(vp, off)) == 0) {
7737c478bd9Sstevel@tonic-gate 		/*
7747c478bd9Sstevel@tonic-gate 		 * Need to really do disk IO to get the page.
7757c478bd9Sstevel@tonic-gate 		 */
7767c478bd9Sstevel@tonic-gate 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
7777c478bd9Sstevel@tonic-gate 		    &io_len_tmp, off, len, 0);
7787c478bd9Sstevel@tonic-gate 
7797c478bd9Sstevel@tonic-gate 		if (pp == NULL)
7807c478bd9Sstevel@tonic-gate 			goto again;
7817c478bd9Sstevel@tonic-gate 
7827c478bd9Sstevel@tonic-gate 		io_off = (uint_t)io_off_tmp;
7837c478bd9Sstevel@tonic-gate 		io_len = (uint_t)io_len_tmp;
7847c478bd9Sstevel@tonic-gate 
7857c478bd9Sstevel@tonic-gate 		/* check for truncation */
7867c478bd9Sstevel@tonic-gate 		/*
7877c478bd9Sstevel@tonic-gate 		 * xxx Clean up and return EIO instead?
7887c478bd9Sstevel@tonic-gate 		 * xxx Ought to go to u_offset_t for everything, but we
7897c478bd9Sstevel@tonic-gate 		 * xxx call lots of things that want uint_t arguments.
7907c478bd9Sstevel@tonic-gate 		 */
7917c478bd9Sstevel@tonic-gate 		ASSERT(io_off == io_off_tmp);
7927c478bd9Sstevel@tonic-gate 
7937c478bd9Sstevel@tonic-gate 		/*
7947c478bd9Sstevel@tonic-gate 		 * get enough buffers for worst-case scenario
7957c478bd9Sstevel@tonic-gate 		 * (i.e., no coalescing possible).
7967c478bd9Sstevel@tonic-gate 		 */
7977c478bd9Sstevel@tonic-gate 		bufcnt = (len + secsize - 1) / secsize;
7987c478bd9Sstevel@tonic-gate 		bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
7997c478bd9Sstevel@tonic-gate 		vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
8007c478bd9Sstevel@tonic-gate 		for (count = 0; count < bufcnt; count++) {
8017c478bd9Sstevel@tonic-gate 			bufs[count].b_edev = devvp->v_rdev;
8027c478bd9Sstevel@tonic-gate 			bufs[count].b_dev = cmpdev(devvp->v_rdev);
8037c478bd9Sstevel@tonic-gate 			bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
8047c478bd9Sstevel@tonic-gate 			bufs[count].b_iodone = hsfs_iodone;
8057c478bd9Sstevel@tonic-gate 			bufs[count].b_vp = vp;
8067c478bd9Sstevel@tonic-gate 			bufs[count].b_file = vp;
8077c478bd9Sstevel@tonic-gate 			sema_init(&bufs[count].b_io, 0, NULL,
8087c478bd9Sstevel@tonic-gate 			    SEMA_DEFAULT, NULL);
8097c478bd9Sstevel@tonic-gate 			sema_init(&bufs[count].b_sem, 0, NULL,
8107c478bd9Sstevel@tonic-gate 			    SEMA_DEFAULT, NULL);
8117c478bd9Sstevel@tonic-gate 		}
8127c478bd9Sstevel@tonic-gate 
8138cd7c4fcSpeterte 		/*
8148cd7c4fcSpeterte 		 * If our filesize is not an integer multiple of PAGESIZE,
8158cd7c4fcSpeterte 		 * we zero that part of the last page that's between EOF and
8168cd7c4fcSpeterte 		 * the PAGESIZE boundary.
8178cd7c4fcSpeterte 		 */
8187c478bd9Sstevel@tonic-gate 		xlen = io_len & PAGEOFFSET;
8197c478bd9Sstevel@tonic-gate 		if (xlen != 0)
8207c478bd9Sstevel@tonic-gate 			pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
8217c478bd9Sstevel@tonic-gate 
8227c478bd9Sstevel@tonic-gate 		va = NULL;
8237c478bd9Sstevel@tonic-gate 		lastp = NULL;
8247c478bd9Sstevel@tonic-gate 		searchp = pp;
8257c478bd9Sstevel@tonic-gate 		io_end = io_off + io_len;
8267c478bd9Sstevel@tonic-gate 		for (count = 0, byte_offset = io_off;
8277c478bd9Sstevel@tonic-gate 			byte_offset < io_end;
8287c478bd9Sstevel@tonic-gate 			count++) {
8297c478bd9Sstevel@tonic-gate 			ASSERT(count < bufcnt);
8307c478bd9Sstevel@tonic-gate 
8317c478bd9Sstevel@tonic-gate 			/* Compute disk address for interleaving. */
8327c478bd9Sstevel@tonic-gate 
8337c478bd9Sstevel@tonic-gate 			/* considered without skips */
8347c478bd9Sstevel@tonic-gate 			which_chunk_lbn = byte_offset / chunk_data_bytes;
8357c478bd9Sstevel@tonic-gate 
8367c478bd9Sstevel@tonic-gate 			/* factor in skips */
8377c478bd9Sstevel@tonic-gate 			offset_lbn = which_chunk_lbn * chunk_lbn_count;
8387c478bd9Sstevel@tonic-gate 
8397c478bd9Sstevel@tonic-gate 			/* convert to physical byte offset for lbn */
8407c478bd9Sstevel@tonic-gate 			offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
8417c478bd9Sstevel@tonic-gate 
8427c478bd9Sstevel@tonic-gate 			/* don't forget offset into lbn */
8437c478bd9Sstevel@tonic-gate 			offset_extra = byte_offset % chunk_data_bytes;
8447c478bd9Sstevel@tonic-gate 
8457c478bd9Sstevel@tonic-gate 			/* get virtual block number for driver */
8467c478bd9Sstevel@tonic-gate 			driver_block = lbtodb(bof + xarsiz
8477c478bd9Sstevel@tonic-gate 				+ offset_bytes + offset_extra);
8487c478bd9Sstevel@tonic-gate 
8497c478bd9Sstevel@tonic-gate 			if (lastp != searchp) {
8507c478bd9Sstevel@tonic-gate 				/* this branch taken first time through loop */
8517c478bd9Sstevel@tonic-gate 				va = vas[count]
8527c478bd9Sstevel@tonic-gate 					= ppmapin(searchp, PROT_WRITE,
8537c478bd9Sstevel@tonic-gate 						(caddr_t)-1);
8547c478bd9Sstevel@tonic-gate 				/* ppmapin() guarantees not to return NULL */
8557c478bd9Sstevel@tonic-gate 			} else {
8567c478bd9Sstevel@tonic-gate 				vas[count] = NULL;
8577c478bd9Sstevel@tonic-gate 			}
8587c478bd9Sstevel@tonic-gate 
8597c478bd9Sstevel@tonic-gate 			bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
8607c478bd9Sstevel@tonic-gate 			bufs[count].b_offset =
8617c478bd9Sstevel@tonic-gate 			    (offset_t)(byte_offset - io_off + off);
8627c478bd9Sstevel@tonic-gate 
8637c478bd9Sstevel@tonic-gate 			/*
8647c478bd9Sstevel@tonic-gate 			 * We specifically use the b_lblkno member here
8657c478bd9Sstevel@tonic-gate 			 * as even in the 32 bit world driver_block can
8667c478bd9Sstevel@tonic-gate 			 * get very large in line with the ISO9660 spec.
8677c478bd9Sstevel@tonic-gate 			 */
8687c478bd9Sstevel@tonic-gate 
8697c478bd9Sstevel@tonic-gate 			bufs[count].b_lblkno = driver_block;
8707c478bd9Sstevel@tonic-gate 
8717c478bd9Sstevel@tonic-gate 			remaining_bytes = ((which_chunk_lbn + 1)
8727c478bd9Sstevel@tonic-gate 				* chunk_data_bytes)
8737c478bd9Sstevel@tonic-gate 				- byte_offset;
8747c478bd9Sstevel@tonic-gate 
8757c478bd9Sstevel@tonic-gate 			/*
8767c478bd9Sstevel@tonic-gate 			 * remaining_bytes can't be zero, as we derived
8777c478bd9Sstevel@tonic-gate 			 * which_chunk_lbn directly from byte_offset.
8787c478bd9Sstevel@tonic-gate 			 */
8799cbc422eSpeterte 			if ((remaining_bytes + byte_offset) < (off + len)) {
8807c478bd9Sstevel@tonic-gate 				/* coalesce-read the rest of the chunk */
8817c478bd9Sstevel@tonic-gate 				bufs[count].b_bcount = remaining_bytes;
8827c478bd9Sstevel@tonic-gate 			} else {
8837c478bd9Sstevel@tonic-gate 				/* get the final bits */
8847c478bd9Sstevel@tonic-gate 				bufs[count].b_bcount = off + len - byte_offset;
8857c478bd9Sstevel@tonic-gate 			}
8867c478bd9Sstevel@tonic-gate 
8877c478bd9Sstevel@tonic-gate 			/*
8887c478bd9Sstevel@tonic-gate 			 * It would be nice to do multiple pages'
8897c478bd9Sstevel@tonic-gate 			 * worth at once here when the opportunity
8907c478bd9Sstevel@tonic-gate 			 * arises, as that has been shown to improve
8917c478bd9Sstevel@tonic-gate 			 * our wall time.  However, to do that
8927c478bd9Sstevel@tonic-gate 			 * requires that we use the pageio subsystem,
8937c478bd9Sstevel@tonic-gate 			 * which doesn't mix well with what we're
8947c478bd9Sstevel@tonic-gate 			 * already using here.  We can't use pageio
8957c478bd9Sstevel@tonic-gate 			 * all the time, because that subsystem
8967c478bd9Sstevel@tonic-gate 			 * assumes that a page is stored in N
8977c478bd9Sstevel@tonic-gate 			 * contiguous blocks on the device.
8987c478bd9Sstevel@tonic-gate 			 * Interleaving violates that assumption.
8997c478bd9Sstevel@tonic-gate 			 */
9007c478bd9Sstevel@tonic-gate 
9017c478bd9Sstevel@tonic-gate 			remainder = PAGESIZE - (byte_offset % PAGESIZE);
9027c478bd9Sstevel@tonic-gate 			if (bufs[count].b_bcount > remainder) {
9037c478bd9Sstevel@tonic-gate 				bufs[count].b_bcount = remainder;
9047c478bd9Sstevel@tonic-gate 			}
9057c478bd9Sstevel@tonic-gate 
9067c478bd9Sstevel@tonic-gate 			bufs[count].b_bufsize = bufs[count].b_bcount;
9079cbc422eSpeterte 			if (((offset_t)byte_offset + bufs[count].b_bcount) >
9089cbc422eSpeterte 				HS_MAXFILEOFF) {
9099cbc422eSpeterte 				break;
9109cbc422eSpeterte 			}
9117c478bd9Sstevel@tonic-gate 			byte_offset += bufs[count].b_bcount;
9127c478bd9Sstevel@tonic-gate 
9137c478bd9Sstevel@tonic-gate 			(void) bdev_strategy(&bufs[count]);
9147c478bd9Sstevel@tonic-gate 
9157c478bd9Sstevel@tonic-gate 			lwp_stat_update(LWP_STAT_INBLK, 1);
9167c478bd9Sstevel@tonic-gate 			lastp = searchp;
9177c478bd9Sstevel@tonic-gate 			if ((remainder - bufs[count].b_bcount) < 1) {
9187c478bd9Sstevel@tonic-gate 				searchp = searchp->p_next;
9197c478bd9Sstevel@tonic-gate 			}
9207c478bd9Sstevel@tonic-gate 		}
9217c478bd9Sstevel@tonic-gate 
9227c478bd9Sstevel@tonic-gate 		bufsused = count;
9237c478bd9Sstevel@tonic-gate 		/* Now wait for everything to come in */
9247c478bd9Sstevel@tonic-gate 		for (count = 0; count < bufsused; count++) {
9257c478bd9Sstevel@tonic-gate 			if (err == 0) {
9267c478bd9Sstevel@tonic-gate 				err = biowait(&bufs[count]);
9277c478bd9Sstevel@tonic-gate 			} else
9287c478bd9Sstevel@tonic-gate 				(void) biowait(&bufs[count]);
9297c478bd9Sstevel@tonic-gate 		}
9307c478bd9Sstevel@tonic-gate 
9317c478bd9Sstevel@tonic-gate 		/* Don't leak resources */
9327c478bd9Sstevel@tonic-gate 		for (count = 0; count < bufcnt; count++) {
9337c478bd9Sstevel@tonic-gate 			sema_destroy(&bufs[count].b_io);
9347c478bd9Sstevel@tonic-gate 			sema_destroy(&bufs[count].b_sem);
9357c478bd9Sstevel@tonic-gate 			if (count < bufsused && vas[count] != NULL) {
9367c478bd9Sstevel@tonic-gate 				ppmapout(vas[count]);
9377c478bd9Sstevel@tonic-gate 			}
9387c478bd9Sstevel@tonic-gate 		}
9397c478bd9Sstevel@tonic-gate 
9407c478bd9Sstevel@tonic-gate 		kmem_free(vas, bufcnt * sizeof (caddr_t));
9417c478bd9Sstevel@tonic-gate 		kmem_free(bufs, bufcnt * sizeof (struct buf));
9427c478bd9Sstevel@tonic-gate 	}
9437c478bd9Sstevel@tonic-gate 
9447c478bd9Sstevel@tonic-gate 	if (err) {
9457c478bd9Sstevel@tonic-gate 		pvn_read_done(pp, B_ERROR);
9467c478bd9Sstevel@tonic-gate 		return (err);
9477c478bd9Sstevel@tonic-gate 	}
9487c478bd9Sstevel@tonic-gate 
9497c478bd9Sstevel@tonic-gate 	/*
9507c478bd9Sstevel@tonic-gate 	 * Lock the requested page, and the one after it if possible.
9517c478bd9Sstevel@tonic-gate 	 * Don't bother if our caller hasn't given us a place to stash
9527c478bd9Sstevel@tonic-gate 	 * the page pointers, since otherwise we'd lock pages that would
9537c478bd9Sstevel@tonic-gate 	 * never get unlocked.
9547c478bd9Sstevel@tonic-gate 	 */
9557c478bd9Sstevel@tonic-gate 	if (pagefound) {
9567c478bd9Sstevel@tonic-gate 		int index;
9577c478bd9Sstevel@tonic-gate 		ulong_t soff;
9587c478bd9Sstevel@tonic-gate 
9597c478bd9Sstevel@tonic-gate 		/*
9607c478bd9Sstevel@tonic-gate 		 * Make sure it's in memory before we say it's here.
9617c478bd9Sstevel@tonic-gate 		 */
9627c478bd9Sstevel@tonic-gate 		if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
9637c478bd9Sstevel@tonic-gate 			hsfs_lostpage++;
9647c478bd9Sstevel@tonic-gate 			goto reread;
9657c478bd9Sstevel@tonic-gate 		}
9667c478bd9Sstevel@tonic-gate 
9677c478bd9Sstevel@tonic-gate 		pl[0] = pp;
9687c478bd9Sstevel@tonic-gate 		index = 1;
9697c478bd9Sstevel@tonic-gate 
9707c478bd9Sstevel@tonic-gate 		/*
9717c478bd9Sstevel@tonic-gate 		 * Try to lock the next page, if it exists, without
9727c478bd9Sstevel@tonic-gate 		 * blocking.
9737c478bd9Sstevel@tonic-gate 		 */
9747c478bd9Sstevel@tonic-gate 		plsz -= PAGESIZE;
9757c478bd9Sstevel@tonic-gate 		/* LINTED (plsz is unsigned) */
9767c478bd9Sstevel@tonic-gate 		for (soff = off + PAGESIZE; plsz > 0;
9777c478bd9Sstevel@tonic-gate 		    soff += PAGESIZE, plsz -= PAGESIZE) {
9787c478bd9Sstevel@tonic-gate 			pp = page_lookup_nowait(vp, (u_offset_t)soff,
9797c478bd9Sstevel@tonic-gate 					SE_SHARED);
9807c478bd9Sstevel@tonic-gate 			if (pp == NULL)
9817c478bd9Sstevel@tonic-gate 				break;
9827c478bd9Sstevel@tonic-gate 			pl[index++] = pp;
9837c478bd9Sstevel@tonic-gate 		}
9847c478bd9Sstevel@tonic-gate 		pl[index] = NULL;
9857c478bd9Sstevel@tonic-gate 		return (0);
9867c478bd9Sstevel@tonic-gate 	}
9877c478bd9Sstevel@tonic-gate 
9887c478bd9Sstevel@tonic-gate 	if (pp != NULL) {
9897c478bd9Sstevel@tonic-gate 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
9907c478bd9Sstevel@tonic-gate 	}
9917c478bd9Sstevel@tonic-gate 
9927c478bd9Sstevel@tonic-gate 	return (err);
9937c478bd9Sstevel@tonic-gate }
9947c478bd9Sstevel@tonic-gate 
9957c478bd9Sstevel@tonic-gate static int
9967c478bd9Sstevel@tonic-gate hsfs_getpage(
9977c478bd9Sstevel@tonic-gate 	struct vnode *vp,
9987c478bd9Sstevel@tonic-gate 	offset_t off,
9997c478bd9Sstevel@tonic-gate 	size_t len,
10007c478bd9Sstevel@tonic-gate 	uint_t *protp,
10017c478bd9Sstevel@tonic-gate 	struct page *pl[],
10027c478bd9Sstevel@tonic-gate 	size_t plsz,
10037c478bd9Sstevel@tonic-gate 	struct seg *seg,
10047c478bd9Sstevel@tonic-gate 	caddr_t addr,
10057c478bd9Sstevel@tonic-gate 	enum seg_rw rw,
10067c478bd9Sstevel@tonic-gate 	struct cred *cred)
10077c478bd9Sstevel@tonic-gate {
10087c478bd9Sstevel@tonic-gate 	int err;
10097c478bd9Sstevel@tonic-gate 	uint_t filsiz;
10107c478bd9Sstevel@tonic-gate 	struct hsnode *hp = VTOH(vp);
10117c478bd9Sstevel@tonic-gate 
10127c478bd9Sstevel@tonic-gate 	/* does not support write */
10137c478bd9Sstevel@tonic-gate 	if (rw == S_WRITE) {
10147c478bd9Sstevel@tonic-gate 		panic("write attempt on READ ONLY HSFS");
10157c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
10167c478bd9Sstevel@tonic-gate 	}
10177c478bd9Sstevel@tonic-gate 
10187c478bd9Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP) {
10197c478bd9Sstevel@tonic-gate 		return (ENOSYS);
10207c478bd9Sstevel@tonic-gate 	}
10217c478bd9Sstevel@tonic-gate 
10229cbc422eSpeterte 	ASSERT(off <= HS_MAXFILEOFF);
10237c478bd9Sstevel@tonic-gate 
10247c478bd9Sstevel@tonic-gate 	/*
10257c478bd9Sstevel@tonic-gate 	 * Determine file data size for EOF check.
10267c478bd9Sstevel@tonic-gate 	 */
10277c478bd9Sstevel@tonic-gate 	filsiz = hp->hs_dirent.ext_size;
10287c478bd9Sstevel@tonic-gate 	if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
10297c478bd9Sstevel@tonic-gate 		return (EFAULT);	/* beyond EOF */
10307c478bd9Sstevel@tonic-gate 
10317c478bd9Sstevel@tonic-gate 	if (protp != NULL)
10327c478bd9Sstevel@tonic-gate 		*protp = PROT_ALL;
10337c478bd9Sstevel@tonic-gate 
10347c478bd9Sstevel@tonic-gate 	if (len <= PAGESIZE)
10357c478bd9Sstevel@tonic-gate 		err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
10367c478bd9Sstevel@tonic-gate 		    seg, addr, rw, cred);
10377c478bd9Sstevel@tonic-gate 	else
10387c478bd9Sstevel@tonic-gate 		err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
10397c478bd9Sstevel@tonic-gate 		    pl, plsz, seg, addr, rw, cred);
10407c478bd9Sstevel@tonic-gate 
10417c478bd9Sstevel@tonic-gate 	return (err);
10427c478bd9Sstevel@tonic-gate }
10437c478bd9Sstevel@tonic-gate 
10447c478bd9Sstevel@tonic-gate 
10457c478bd9Sstevel@tonic-gate 
10467c478bd9Sstevel@tonic-gate /*
10477c478bd9Sstevel@tonic-gate  * This function should never be called. We need to have it to pass
10487c478bd9Sstevel@tonic-gate  * it as an argument to other functions.
10497c478bd9Sstevel@tonic-gate  */
10507c478bd9Sstevel@tonic-gate /*ARGSUSED*/
10517c478bd9Sstevel@tonic-gate int
10527c478bd9Sstevel@tonic-gate hsfs_putapage(
10537c478bd9Sstevel@tonic-gate 	vnode_t		*vp,
10547c478bd9Sstevel@tonic-gate 	page_t		*pp,
10557c478bd9Sstevel@tonic-gate 	u_offset_t	*offp,
10567c478bd9Sstevel@tonic-gate 	size_t		*lenp,
10577c478bd9Sstevel@tonic-gate 	int		flags,
10587c478bd9Sstevel@tonic-gate 	cred_t		*cr)
10597c478bd9Sstevel@tonic-gate {
10607c478bd9Sstevel@tonic-gate 	/* should never happen - just destroy it */
10617c478bd9Sstevel@tonic-gate 	cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
10627c478bd9Sstevel@tonic-gate 	pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
10637c478bd9Sstevel@tonic-gate 	return (0);
10647c478bd9Sstevel@tonic-gate }
10657c478bd9Sstevel@tonic-gate 
10667c478bd9Sstevel@tonic-gate 
10677c478bd9Sstevel@tonic-gate /*
10687c478bd9Sstevel@tonic-gate  * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
10697c478bd9Sstevel@tonic-gate  * B_INVAL is set by:
10707c478bd9Sstevel@tonic-gate  *
10717c478bd9Sstevel@tonic-gate  *	1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
10727c478bd9Sstevel@tonic-gate  *	2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
10737c478bd9Sstevel@tonic-gate  *	   which translates to an MC_SYNC with the MS_INVALIDATE flag.
10747c478bd9Sstevel@tonic-gate  *
10757c478bd9Sstevel@tonic-gate  * The B_FREE (as well as the B_DONTNEED) flag is set when the
10767c478bd9Sstevel@tonic-gate  * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
10777c478bd9Sstevel@tonic-gate  * from SEGVN to release pages behind a pagefault.
10787c478bd9Sstevel@tonic-gate  */
10797c478bd9Sstevel@tonic-gate /*ARGSUSED*/
10807c478bd9Sstevel@tonic-gate static int
10817c478bd9Sstevel@tonic-gate hsfs_putpage(
10827c478bd9Sstevel@tonic-gate 	struct vnode	*vp,
10837c478bd9Sstevel@tonic-gate 	offset_t	off,
10847c478bd9Sstevel@tonic-gate 	size_t		len,
10857c478bd9Sstevel@tonic-gate 	int		flags,
10867c478bd9Sstevel@tonic-gate 	struct cred	*cr)
10877c478bd9Sstevel@tonic-gate {
10887c478bd9Sstevel@tonic-gate 	int error = 0;
10897c478bd9Sstevel@tonic-gate 
10907c478bd9Sstevel@tonic-gate 	if (vp->v_count == 0) {
10917c478bd9Sstevel@tonic-gate 		panic("hsfs_putpage: bad v_count");
10927c478bd9Sstevel@tonic-gate 		/*NOTREACHED*/
10937c478bd9Sstevel@tonic-gate 	}
10947c478bd9Sstevel@tonic-gate 
10957c478bd9Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
10967c478bd9Sstevel@tonic-gate 		return (ENOSYS);
10977c478bd9Sstevel@tonic-gate 
10989cbc422eSpeterte 	ASSERT(off <= HS_MAXFILEOFF);
10997c478bd9Sstevel@tonic-gate 
11007c478bd9Sstevel@tonic-gate 	if (!vn_has_cached_data(vp))	/* no pages mapped */
11017c478bd9Sstevel@tonic-gate 		return (0);
11027c478bd9Sstevel@tonic-gate 
11037c478bd9Sstevel@tonic-gate 	if (len == 0)		/* from 'off' to EOF */
11047c478bd9Sstevel@tonic-gate 		error = pvn_vplist_dirty(vp, off,
11057c478bd9Sstevel@tonic-gate 					hsfs_putapage, flags, cr);
11067c478bd9Sstevel@tonic-gate 	else {
11077c478bd9Sstevel@tonic-gate 		offset_t end_off = off + len;
11087c478bd9Sstevel@tonic-gate 		offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
11097c478bd9Sstevel@tonic-gate 		offset_t io_off;
11107c478bd9Sstevel@tonic-gate 
11117c478bd9Sstevel@tonic-gate 		file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
11127c478bd9Sstevel@tonic-gate 		if (end_off > file_size)
11137c478bd9Sstevel@tonic-gate 			end_off = file_size;
11147c478bd9Sstevel@tonic-gate 
11157c478bd9Sstevel@tonic-gate 		for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
11167c478bd9Sstevel@tonic-gate 			page_t *pp;
11177c478bd9Sstevel@tonic-gate 
11187c478bd9Sstevel@tonic-gate 			/*
11197c478bd9Sstevel@tonic-gate 			 * We insist on getting the page only if we are
11207c478bd9Sstevel@tonic-gate 			 * about to invalidate, free or write it and
11217c478bd9Sstevel@tonic-gate 			 * the B_ASYNC flag is not set.
11227c478bd9Sstevel@tonic-gate 			 */
11237c478bd9Sstevel@tonic-gate 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
11247c478bd9Sstevel@tonic-gate 				pp = page_lookup(vp, io_off,
11257c478bd9Sstevel@tonic-gate 					(flags & (B_INVAL | B_FREE)) ?
11267c478bd9Sstevel@tonic-gate 					    SE_EXCL : SE_SHARED);
11277c478bd9Sstevel@tonic-gate 			} else {
11287c478bd9Sstevel@tonic-gate 				pp = page_lookup_nowait(vp, io_off,
11297c478bd9Sstevel@tonic-gate 					(flags & B_FREE) ? SE_EXCL : SE_SHARED);
11307c478bd9Sstevel@tonic-gate 			}
11317c478bd9Sstevel@tonic-gate 
11327c478bd9Sstevel@tonic-gate 			if (pp == NULL)
11337c478bd9Sstevel@tonic-gate 				continue;
11347c478bd9Sstevel@tonic-gate 			/*
11357c478bd9Sstevel@tonic-gate 			 * Normally pvn_getdirty() should return 0, which
11367c478bd9Sstevel@tonic-gate 			 * impies that it has done the job for us.
11377c478bd9Sstevel@tonic-gate 			 * The shouldn't-happen scenario is when it returns 1.
11387c478bd9Sstevel@tonic-gate 			 * This means that the page has been modified and
11397c478bd9Sstevel@tonic-gate 			 * needs to be put back.
11407c478bd9Sstevel@tonic-gate 			 * Since we can't write on a CD, we fake a failed
11417c478bd9Sstevel@tonic-gate 			 * I/O and force pvn_write_done() to destroy the page.
11427c478bd9Sstevel@tonic-gate 			 */
11437c478bd9Sstevel@tonic-gate 			if (pvn_getdirty(pp, flags) == 1) {
11447c478bd9Sstevel@tonic-gate 				cmn_err(CE_NOTE,
11457c478bd9Sstevel@tonic-gate 					"hsfs_putpage: dirty HSFS page");
11467c478bd9Sstevel@tonic-gate 				pvn_write_done(pp, flags |
11477c478bd9Sstevel@tonic-gate 				    B_ERROR | B_WRITE | B_INVAL | B_FORCE);
11487c478bd9Sstevel@tonic-gate 			}
11497c478bd9Sstevel@tonic-gate 		}
11507c478bd9Sstevel@tonic-gate 	}
11517c478bd9Sstevel@tonic-gate 	return (error);
11527c478bd9Sstevel@tonic-gate }
11537c478bd9Sstevel@tonic-gate 
11547c478bd9Sstevel@tonic-gate 
11557c478bd9Sstevel@tonic-gate /*ARGSUSED*/
11567c478bd9Sstevel@tonic-gate static int
11577c478bd9Sstevel@tonic-gate hsfs_map(
11587c478bd9Sstevel@tonic-gate 	struct vnode *vp,
11597c478bd9Sstevel@tonic-gate 	offset_t off,
11607c478bd9Sstevel@tonic-gate 	struct as *as,
11617c478bd9Sstevel@tonic-gate 	caddr_t *addrp,
11627c478bd9Sstevel@tonic-gate 	size_t len,
11637c478bd9Sstevel@tonic-gate 	uchar_t prot,
11647c478bd9Sstevel@tonic-gate 	uchar_t maxprot,
11657c478bd9Sstevel@tonic-gate 	uint_t flags,
11667c478bd9Sstevel@tonic-gate 	struct cred *cred)
11677c478bd9Sstevel@tonic-gate {
11687c478bd9Sstevel@tonic-gate 	struct segvn_crargs vn_a;
11697c478bd9Sstevel@tonic-gate 	int error;
11707c478bd9Sstevel@tonic-gate 
11717c478bd9Sstevel@tonic-gate 	/* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
11727c478bd9Sstevel@tonic-gate 
11737c478bd9Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
11747c478bd9Sstevel@tonic-gate 		return (ENOSYS);
11757c478bd9Sstevel@tonic-gate 
11769cbc422eSpeterte 	if (off > HS_MAXFILEOFF || off < 0 ||
11779cbc422eSpeterte 	    (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1178cfa55013Speterte 		return (ENXIO);
11797c478bd9Sstevel@tonic-gate 
11807c478bd9Sstevel@tonic-gate 	if (vp->v_type != VREG) {
11817c478bd9Sstevel@tonic-gate 		return (ENODEV);
11827c478bd9Sstevel@tonic-gate 	}
11837c478bd9Sstevel@tonic-gate 
11847c478bd9Sstevel@tonic-gate 	/*
11857c478bd9Sstevel@tonic-gate 	 * If file is being locked, disallow mapping.
11867c478bd9Sstevel@tonic-gate 	 */
11877c478bd9Sstevel@tonic-gate 	if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
11887c478bd9Sstevel@tonic-gate 		return (EAGAIN);
11897c478bd9Sstevel@tonic-gate 
11907c478bd9Sstevel@tonic-gate 	as_rangelock(as);
11917c478bd9Sstevel@tonic-gate 
11927c478bd9Sstevel@tonic-gate 	if ((flags & MAP_FIXED) == 0) {
11937c478bd9Sstevel@tonic-gate 		map_addr(addrp, len, off, 1, flags);
11947c478bd9Sstevel@tonic-gate 		if (*addrp == NULL) {
11957c478bd9Sstevel@tonic-gate 			as_rangeunlock(as);
11967c478bd9Sstevel@tonic-gate 			return (ENOMEM);
11977c478bd9Sstevel@tonic-gate 		}
11987c478bd9Sstevel@tonic-gate 	} else {
11997c478bd9Sstevel@tonic-gate 		/*
12007c478bd9Sstevel@tonic-gate 		 * User specified address - blow away any previous mappings
12017c478bd9Sstevel@tonic-gate 		 */
12027c478bd9Sstevel@tonic-gate 		(void) as_unmap(as, *addrp, len);
12037c478bd9Sstevel@tonic-gate 	}
12047c478bd9Sstevel@tonic-gate 
12057c478bd9Sstevel@tonic-gate 	vn_a.vp = vp;
12067c478bd9Sstevel@tonic-gate 	vn_a.offset = off;
12077c478bd9Sstevel@tonic-gate 	vn_a.type = flags & MAP_TYPE;
12087c478bd9Sstevel@tonic-gate 	vn_a.prot = prot;
12097c478bd9Sstevel@tonic-gate 	vn_a.maxprot = maxprot;
12107c478bd9Sstevel@tonic-gate 	vn_a.flags = flags & ~MAP_TYPE;
12117c478bd9Sstevel@tonic-gate 	vn_a.cred = cred;
12127c478bd9Sstevel@tonic-gate 	vn_a.amp = NULL;
12137c478bd9Sstevel@tonic-gate 	vn_a.szc = 0;
12147c478bd9Sstevel@tonic-gate 	vn_a.lgrp_mem_policy_flags = 0;
12157c478bd9Sstevel@tonic-gate 
12167c478bd9Sstevel@tonic-gate 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
12177c478bd9Sstevel@tonic-gate 	as_rangeunlock(as);
12187c478bd9Sstevel@tonic-gate 	return (error);
12197c478bd9Sstevel@tonic-gate }
12207c478bd9Sstevel@tonic-gate 
12217c478bd9Sstevel@tonic-gate /* ARGSUSED */
12227c478bd9Sstevel@tonic-gate static int
12237c478bd9Sstevel@tonic-gate hsfs_addmap(
12247c478bd9Sstevel@tonic-gate 	struct vnode *vp,
12257c478bd9Sstevel@tonic-gate 	offset_t off,
12267c478bd9Sstevel@tonic-gate 	struct as *as,
12277c478bd9Sstevel@tonic-gate 	caddr_t addr,
12287c478bd9Sstevel@tonic-gate 	size_t len,
12297c478bd9Sstevel@tonic-gate 	uchar_t prot,
12307c478bd9Sstevel@tonic-gate 	uchar_t maxprot,
12317c478bd9Sstevel@tonic-gate 	uint_t flags,
12327c478bd9Sstevel@tonic-gate 	struct cred *cr)
12337c478bd9Sstevel@tonic-gate {
12347c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
12357c478bd9Sstevel@tonic-gate 
12367c478bd9Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
12377c478bd9Sstevel@tonic-gate 		return (ENOSYS);
12387c478bd9Sstevel@tonic-gate 
12397c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
12407c478bd9Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
12417c478bd9Sstevel@tonic-gate 	hp->hs_mapcnt += btopr(len);
12427c478bd9Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
12437c478bd9Sstevel@tonic-gate 	return (0);
12447c478bd9Sstevel@tonic-gate }
12457c478bd9Sstevel@tonic-gate 
12467c478bd9Sstevel@tonic-gate /*ARGSUSED*/
12477c478bd9Sstevel@tonic-gate static int
12487c478bd9Sstevel@tonic-gate hsfs_delmap(
12497c478bd9Sstevel@tonic-gate 	struct vnode *vp,
12507c478bd9Sstevel@tonic-gate 	offset_t off,
12517c478bd9Sstevel@tonic-gate 	struct as *as,
12527c478bd9Sstevel@tonic-gate 	caddr_t addr,
12537c478bd9Sstevel@tonic-gate 	size_t len,
12547c478bd9Sstevel@tonic-gate 	uint_t prot,
12557c478bd9Sstevel@tonic-gate 	uint_t maxprot,
12567c478bd9Sstevel@tonic-gate 	uint_t flags,
12577c478bd9Sstevel@tonic-gate 	struct cred *cr)
12587c478bd9Sstevel@tonic-gate {
12597c478bd9Sstevel@tonic-gate 	struct hsnode *hp;
12607c478bd9Sstevel@tonic-gate 
12617c478bd9Sstevel@tonic-gate 	if (vp->v_flag & VNOMAP)
12627c478bd9Sstevel@tonic-gate 		return (ENOSYS);
12637c478bd9Sstevel@tonic-gate 
12647c478bd9Sstevel@tonic-gate 	hp = VTOH(vp);
12657c478bd9Sstevel@tonic-gate 	mutex_enter(&hp->hs_contents_lock);
12667c478bd9Sstevel@tonic-gate 	hp->hs_mapcnt -= btopr(len);	/* Count released mappings */
12677c478bd9Sstevel@tonic-gate 	ASSERT(hp->hs_mapcnt >= 0);
12687c478bd9Sstevel@tonic-gate 	mutex_exit(&hp->hs_contents_lock);
12697c478bd9Sstevel@tonic-gate 	return (0);
12707c478bd9Sstevel@tonic-gate }
12717c478bd9Sstevel@tonic-gate 
12727c478bd9Sstevel@tonic-gate /* ARGSUSED */
12737c478bd9Sstevel@tonic-gate static int
12747c478bd9Sstevel@tonic-gate hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp)
12757c478bd9Sstevel@tonic-gate {
12767c478bd9Sstevel@tonic-gate 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
12777c478bd9Sstevel@tonic-gate }
12787c478bd9Sstevel@tonic-gate 
12797c478bd9Sstevel@tonic-gate /* ARGSUSED */
12807c478bd9Sstevel@tonic-gate static int
12817c478bd9Sstevel@tonic-gate hsfs_frlock(
12827c478bd9Sstevel@tonic-gate 	struct vnode *vp,
12837c478bd9Sstevel@tonic-gate 	int cmd,
12847c478bd9Sstevel@tonic-gate 	struct flock64 *bfp,
12857c478bd9Sstevel@tonic-gate 	int flag,
12867c478bd9Sstevel@tonic-gate 	offset_t offset,
12877c478bd9Sstevel@tonic-gate 	struct flk_callback *flk_cbp,
12887c478bd9Sstevel@tonic-gate 	cred_t *cr)
12897c478bd9Sstevel@tonic-gate {
12907c478bd9Sstevel@tonic-gate 	struct hsnode *hp = VTOH(vp);
12917c478bd9Sstevel@tonic-gate 
12927c478bd9Sstevel@tonic-gate 	/*
12937c478bd9Sstevel@tonic-gate 	 * If the file is being mapped, disallow fs_frlock.
12947c478bd9Sstevel@tonic-gate 	 * We are not holding the hs_contents_lock while checking
12957c478bd9Sstevel@tonic-gate 	 * hs_mapcnt because the current locking strategy drops all
12967c478bd9Sstevel@tonic-gate 	 * locks before calling fs_frlock.
12977c478bd9Sstevel@tonic-gate 	 * So, hs_mapcnt could change before we enter fs_frlock making
12987c478bd9Sstevel@tonic-gate 	 * it meaningless to have held hs_contents_lock in the first place.
12997c478bd9Sstevel@tonic-gate 	 */
13007c478bd9Sstevel@tonic-gate 	if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
13017c478bd9Sstevel@tonic-gate 		return (EAGAIN);
13027c478bd9Sstevel@tonic-gate 
13037c478bd9Sstevel@tonic-gate 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr));
13047c478bd9Sstevel@tonic-gate }
13057c478bd9Sstevel@tonic-gate 
1306fc1c62b8Sfrankho /* ARGSUSED */
1307fc1c62b8Sfrankho static int
1308fc1c62b8Sfrankho hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr)
1309fc1c62b8Sfrankho {
1310fc1c62b8Sfrankho 	struct hsfs	*fsp;
1311fc1c62b8Sfrankho 
1312fc1c62b8Sfrankho 	int		error = 0;
1313fc1c62b8Sfrankho 
1314fc1c62b8Sfrankho 	switch (cmd) {
1315fc1c62b8Sfrankho 
1316fc1c62b8Sfrankho 	case _PC_NAME_MAX:
1317fc1c62b8Sfrankho 		fsp = VFS_TO_HSFS(vp->v_vfsp);
1318fc1c62b8Sfrankho 		*valp = fsp->hsfs_namemax;
1319fc1c62b8Sfrankho 		break;
1320fc1c62b8Sfrankho 
1321fc1c62b8Sfrankho 	case _PC_FILESIZEBITS:
1322fc1c62b8Sfrankho 		*valp = 33;	/* Without multi extent support: 4 GB - 2k */
1323fc1c62b8Sfrankho 		break;
1324fc1c62b8Sfrankho 
1325fc1c62b8Sfrankho 	default:
1326fc1c62b8Sfrankho 		error = fs_pathconf(vp, cmd, valp, cr);
1327fc1c62b8Sfrankho 	}
1328fc1c62b8Sfrankho 
1329fc1c62b8Sfrankho 	return (error);
1330fc1c62b8Sfrankho }
1331fc1c62b8Sfrankho 
1332fc1c62b8Sfrankho 
1333fc1c62b8Sfrankho 
13347c478bd9Sstevel@tonic-gate const fs_operation_def_t hsfs_vnodeops_template[] = {
1335*aa59c4cbSrsb 	VOPNAME_OPEN,		{ .vop_open = hsfs_open },
1336*aa59c4cbSrsb 	VOPNAME_CLOSE,		{ .vop_close = hsfs_close },
1337*aa59c4cbSrsb 	VOPNAME_READ,		{ .vop_read = hsfs_read },
1338*aa59c4cbSrsb 	VOPNAME_GETATTR,	{ .vop_getattr = hsfs_getattr },
1339*aa59c4cbSrsb 	VOPNAME_ACCESS,		{ .vop_access = hsfs_access },
1340*aa59c4cbSrsb 	VOPNAME_LOOKUP,		{ .vop_lookup = hsfs_lookup },
1341*aa59c4cbSrsb 	VOPNAME_READDIR,	{ .vop_readdir = hsfs_readdir },
1342*aa59c4cbSrsb 	VOPNAME_READLINK,	{ .vop_readlink = hsfs_readlink },
1343*aa59c4cbSrsb 	VOPNAME_FSYNC,		{ .vop_fsync = hsfs_fsync },
1344*aa59c4cbSrsb 	VOPNAME_INACTIVE,	{ .vop_inactive = hsfs_inactive },
1345*aa59c4cbSrsb 	VOPNAME_FID,		{ .vop_fid = hsfs_fid },
1346*aa59c4cbSrsb 	VOPNAME_SEEK,		{ .vop_seek = hsfs_seek },
1347*aa59c4cbSrsb 	VOPNAME_FRLOCK,		{ .vop_frlock = hsfs_frlock },
1348*aa59c4cbSrsb 	VOPNAME_GETPAGE,	{ .vop_getpage = hsfs_getpage },
1349*aa59c4cbSrsb 	VOPNAME_PUTPAGE,	{ .vop_putpage = hsfs_putpage },
1350*aa59c4cbSrsb 	VOPNAME_MAP,		{ .vop_map = hsfs_map },
1351*aa59c4cbSrsb 	VOPNAME_ADDMAP,		{ .vop_addmap = hsfs_addmap },
1352*aa59c4cbSrsb 	VOPNAME_DELMAP,		{ .vop_delmap = hsfs_delmap },
1353*aa59c4cbSrsb 	VOPNAME_PATHCONF,	{ .vop_pathconf = hsfs_pathconf },
1354*aa59c4cbSrsb 	NULL,			NULL
13557c478bd9Sstevel@tonic-gate };
13567c478bd9Sstevel@tonic-gate 
13577c478bd9Sstevel@tonic-gate struct vnodeops *hsfs_vnodeops;
1358