17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
59cbc422eSpeterte * Common Development and Distribution License (the "License").
69cbc422eSpeterte * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
213b862e9aSRoger A. Faulkner
227c478bd9Sstevel@tonic-gate /*
233b862e9aSRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
247c478bd9Sstevel@tonic-gate * Use is subject to license terms.
2506e6833aSJosef 'Jeff' Sipek * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26*ade42b55SSebastien Roy * Copyright (c) 2017 by Delphix. All rights reserved.
277c478bd9Sstevel@tonic-gate */
287c478bd9Sstevel@tonic-gate
297c478bd9Sstevel@tonic-gate /*
307c478bd9Sstevel@tonic-gate * Vnode operations for the High Sierra filesystem
317c478bd9Sstevel@tonic-gate */
327c478bd9Sstevel@tonic-gate
337c478bd9Sstevel@tonic-gate #include <sys/types.h>
347c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
357c478bd9Sstevel@tonic-gate #include <sys/param.h>
367c478bd9Sstevel@tonic-gate #include <sys/time.h>
377c478bd9Sstevel@tonic-gate #include <sys/systm.h>
387c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
397c478bd9Sstevel@tonic-gate #include <sys/resource.h>
407c478bd9Sstevel@tonic-gate #include <sys/signal.h>
417c478bd9Sstevel@tonic-gate #include <sys/cred.h>
427c478bd9Sstevel@tonic-gate #include <sys/user.h>
437c478bd9Sstevel@tonic-gate #include <sys/buf.h>
447c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
45aa59c4cbSrsb #include <sys/vfs_opreg.h>
467c478bd9Sstevel@tonic-gate #include <sys/stat.h>
477c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
487c478bd9Sstevel@tonic-gate #include <sys/mode.h>
497c478bd9Sstevel@tonic-gate #include <sys/proc.h>
507c478bd9Sstevel@tonic-gate #include <sys/disp.h>
517c478bd9Sstevel@tonic-gate #include <sys/file.h>
527c478bd9Sstevel@tonic-gate #include <sys/fcntl.h>
537c478bd9Sstevel@tonic-gate #include <sys/flock.h>
547c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
557c478bd9Sstevel@tonic-gate #include <sys/uio.h>
567c478bd9Sstevel@tonic-gate #include <sys/conf.h>
577c478bd9Sstevel@tonic-gate #include <sys/errno.h>
587c478bd9Sstevel@tonic-gate #include <sys/mman.h>
597c478bd9Sstevel@tonic-gate #include <sys/pathname.h>
607c478bd9Sstevel@tonic-gate #include <sys/debug.h>
617c478bd9Sstevel@tonic-gate #include <sys/vmsystm.h>
627c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
637c478bd9Sstevel@tonic-gate #include <sys/fbuf.h>
647c478bd9Sstevel@tonic-gate #include <sys/dirent.h>
657c478bd9Sstevel@tonic-gate #include <sys/errno.h>
6684b82766Smg #include <sys/dkio.h>
6784b82766Smg #include <sys/cmn_err.h>
6884b82766Smg #include <sys/atomic.h>
697c478bd9Sstevel@tonic-gate
707c478bd9Sstevel@tonic-gate #include <vm/hat.h>
717c478bd9Sstevel@tonic-gate #include <vm/page.h>
727c478bd9Sstevel@tonic-gate #include <vm/pvn.h>
737c478bd9Sstevel@tonic-gate #include <vm/as.h>
747c478bd9Sstevel@tonic-gate #include <vm/seg.h>
757c478bd9Sstevel@tonic-gate #include <vm/seg_map.h>
767c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
777c478bd9Sstevel@tonic-gate #include <vm/seg_vn.h>
787c478bd9Sstevel@tonic-gate #include <vm/rm.h>
797c478bd9Sstevel@tonic-gate #include <vm/page.h>
807c478bd9Sstevel@tonic-gate #include <sys/swap.h>
8184b82766Smg #include <sys/avl.h>
8284b82766Smg #include <sys/sunldi.h>
8384b82766Smg #include <sys/ddi.h>
8484b82766Smg #include <sys/sunddi.h>
8584b82766Smg #include <sys/sdt.h>
8684b82766Smg
8784b82766Smg /*
8884b82766Smg * For struct modlinkage
8984b82766Smg */
9084b82766Smg #include <sys/modctl.h>
917c478bd9Sstevel@tonic-gate
927c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_spec.h>
937c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_node.h>
947c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_impl.h>
957c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_susp.h>
967c478bd9Sstevel@tonic-gate #include <sys/fs/hsfs_rrip.h>
977c478bd9Sstevel@tonic-gate
987c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h>
997c478bd9Sstevel@tonic-gate
10084b82766Smg /* # of contiguous requests to detect sequential access pattern */
10184b82766Smg static int seq_contig_requests = 2;
10284b82766Smg
10384b82766Smg /*
10484b82766Smg * This is the max number os taskq threads that will be created
10584b82766Smg * if required. Since we are using a Dynamic TaskQ by default only
10684b82766Smg * one thread is created initially.
10784b82766Smg *
10884b82766Smg * NOTE: In the usual hsfs use case this per fs instance number
10984b82766Smg * of taskq threads should not place any undue load on a system.
11084b82766Smg * Even on an unusual system with say 100 CDROM drives, 800 threads
11184b82766Smg * will not be created unless all the drives are loaded and all
11284b82766Smg * of them are saturated with I/O at the same time! If there is at
11384b82766Smg * all a complaint of system load due to such an unusual case it
11484b82766Smg * should be easy enough to change to one per-machine Dynamic TaskQ
11584b82766Smg * for all hsfs mounts with a nthreads of say 32.
11684b82766Smg */
11784b82766Smg static int hsfs_taskq_nthreads = 8; /* # of taskq threads per fs */
11884b82766Smg
11984b82766Smg /* Min count of adjacent bufs that will avoid buf coalescing */
12084b82766Smg static int hsched_coalesce_min = 2;
12184b82766Smg
12284b82766Smg /*
12384b82766Smg * Kmem caches for heavily used small allocations. Using these kmem
12484b82766Smg * caches provides a factor of 3 reduction in system time and greatly
12584b82766Smg * aids overall throughput esp. on SPARC.
12684b82766Smg */
12784b82766Smg struct kmem_cache *hio_cache;
12884b82766Smg struct kmem_cache *hio_info_cache;
12984b82766Smg
130d10b6702Sfrankho /*
131d10b6702Sfrankho * This tunable allows us to ignore inode numbers from rrip-1.12.
132d10b6702Sfrankho * In this case, we fall back to our default inode algorithm.
133d10b6702Sfrankho */
134d10b6702Sfrankho extern int use_rrip_inodes;
135d10b6702Sfrankho
13684b82766Smg static int hsched_deadline_compare(const void *x1, const void *x2);
13784b82766Smg static int hsched_offset_compare(const void *x1, const void *x2);
13884b82766Smg static void hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra);
13984b82766Smg int hsched_invoke_strategy(struct hsfs *fsp);
140d10b6702Sfrankho
1417c478bd9Sstevel@tonic-gate /* ARGSUSED */
1427c478bd9Sstevel@tonic-gate static int
hsfs_fsync(vnode_t * cp,int syncflag,cred_t * cred,caller_context_t * ct)143*ade42b55SSebastien Roy hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred, caller_context_t *ct)
1447c478bd9Sstevel@tonic-gate {
1457c478bd9Sstevel@tonic-gate return (0);
1467c478bd9Sstevel@tonic-gate }
1477c478bd9Sstevel@tonic-gate
1487c478bd9Sstevel@tonic-gate
1497c478bd9Sstevel@tonic-gate /*ARGSUSED*/
1507c478bd9Sstevel@tonic-gate static int
hsfs_read(struct vnode * vp,struct uio * uiop,int ioflag,struct cred * cred,struct caller_context * ct)151*ade42b55SSebastien Roy hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
152*ade42b55SSebastien Roy struct caller_context *ct)
1537c478bd9Sstevel@tonic-gate {
1547c478bd9Sstevel@tonic-gate caddr_t base;
1558cd7c4fcSpeterte offset_t diff;
1567c478bd9Sstevel@tonic-gate int error;
1578cd7c4fcSpeterte struct hsnode *hp;
1588cd7c4fcSpeterte uint_t filesize;
1597c478bd9Sstevel@tonic-gate
1607c478bd9Sstevel@tonic-gate hp = VTOH(vp);
1617c478bd9Sstevel@tonic-gate /*
1627c478bd9Sstevel@tonic-gate * if vp is of type VDIR, make sure dirent
1637c478bd9Sstevel@tonic-gate * is filled up with all info (because of ptbl)
1647c478bd9Sstevel@tonic-gate */
1657c478bd9Sstevel@tonic-gate if (vp->v_type == VDIR) {
1667c478bd9Sstevel@tonic-gate if (hp->hs_dirent.ext_size == 0)
1677c478bd9Sstevel@tonic-gate hs_filldirent(vp, &hp->hs_dirent);
1687c478bd9Sstevel@tonic-gate }
1697c478bd9Sstevel@tonic-gate filesize = hp->hs_dirent.ext_size;
1707c478bd9Sstevel@tonic-gate
1718cd7c4fcSpeterte /* Sanity checks. */
1728cd7c4fcSpeterte if (uiop->uio_resid == 0 || /* No data wanted. */
1739cbc422eSpeterte uiop->uio_loffset > HS_MAXFILEOFF || /* Offset too big. */
1748cd7c4fcSpeterte uiop->uio_loffset >= filesize) /* Past EOF. */
1758cd7c4fcSpeterte return (0);
1767c478bd9Sstevel@tonic-gate
1777c478bd9Sstevel@tonic-gate do {
1788cd7c4fcSpeterte /*
1798cd7c4fcSpeterte * We want to ask for only the "right" amount of data.
1808cd7c4fcSpeterte * In this case that means:-
1818cd7c4fcSpeterte *
1828cd7c4fcSpeterte * We can't get data from beyond our EOF. If asked,
1838cd7c4fcSpeterte * we will give a short read.
1848cd7c4fcSpeterte *
1858cd7c4fcSpeterte * segmap_getmapflt returns buffers of MAXBSIZE bytes.
1868cd7c4fcSpeterte * These buffers are always MAXBSIZE aligned.
1878cd7c4fcSpeterte * If our starting offset is not MAXBSIZE aligned,
1888cd7c4fcSpeterte * we can only ask for less than MAXBSIZE bytes.
1898cd7c4fcSpeterte *
1908cd7c4fcSpeterte * If our requested offset and length are such that
1918cd7c4fcSpeterte * they belong in different MAXBSIZE aligned slots
1928cd7c4fcSpeterte * then we'll be making more than one call on
1938cd7c4fcSpeterte * segmap_getmapflt.
1948cd7c4fcSpeterte *
1958cd7c4fcSpeterte * This diagram shows the variables we use and their
1968cd7c4fcSpeterte * relationships.
1978cd7c4fcSpeterte *
1988cd7c4fcSpeterte * |<-----MAXBSIZE----->|
1998cd7c4fcSpeterte * +--------------------------...+
2008cd7c4fcSpeterte * |.....mapon->|<--n-->|....*...|EOF
2018cd7c4fcSpeterte * +--------------------------...+
2028cd7c4fcSpeterte * uio_loffset->|
2038cd7c4fcSpeterte * uio_resid....|<---------->|
2048cd7c4fcSpeterte * diff.........|<-------------->|
2058cd7c4fcSpeterte *
2068cd7c4fcSpeterte * So, in this case our offset is not aligned
2078cd7c4fcSpeterte * and our request takes us outside of the
2088cd7c4fcSpeterte * MAXBSIZE window. We will break this up into
2098cd7c4fcSpeterte * two segmap_getmapflt calls.
2108cd7c4fcSpeterte */
2118cd7c4fcSpeterte size_t nbytes;
2128cd7c4fcSpeterte offset_t mapon;
2138cd7c4fcSpeterte size_t n;
2148cd7c4fcSpeterte uint_t flags;
2158cd7c4fcSpeterte
2168cd7c4fcSpeterte mapon = uiop->uio_loffset & MAXBOFFSET;
2178cd7c4fcSpeterte diff = filesize - uiop->uio_loffset;
2188cd7c4fcSpeterte nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
2198cd7c4fcSpeterte n = MIN(diff, nbytes);
2208cd7c4fcSpeterte if (n <= 0) {
2218cd7c4fcSpeterte /* EOF or request satisfied. */
2228cd7c4fcSpeterte return (0);
2237c478bd9Sstevel@tonic-gate }
2247c478bd9Sstevel@tonic-gate
2258cd7c4fcSpeterte base = segmap_getmapflt(segkmap, vp,
2268cd7c4fcSpeterte (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
2278cd7c4fcSpeterte
2288cd7c4fcSpeterte error = uiomove(base + mapon, n, UIO_READ, uiop);
2297c478bd9Sstevel@tonic-gate
2307c478bd9Sstevel@tonic-gate if (error == 0) {
2317c478bd9Sstevel@tonic-gate /*
2327c478bd9Sstevel@tonic-gate * if read a whole block, or read to eof,
2337c478bd9Sstevel@tonic-gate * won't need this buffer again soon.
2347c478bd9Sstevel@tonic-gate */
2358cd7c4fcSpeterte if (n + mapon == MAXBSIZE ||
2368cd7c4fcSpeterte uiop->uio_loffset == filesize)
2377c478bd9Sstevel@tonic-gate flags = SM_DONTNEED;
2387c478bd9Sstevel@tonic-gate else
2397c478bd9Sstevel@tonic-gate flags = 0;
24084b82766Smg
2417c478bd9Sstevel@tonic-gate error = segmap_release(segkmap, base, flags);
2427c478bd9Sstevel@tonic-gate } else
2437c478bd9Sstevel@tonic-gate (void) segmap_release(segkmap, base, 0);
2447c478bd9Sstevel@tonic-gate } while (error == 0 && uiop->uio_resid > 0);
2457c478bd9Sstevel@tonic-gate
2467c478bd9Sstevel@tonic-gate return (error);
2477c478bd9Sstevel@tonic-gate }
2487c478bd9Sstevel@tonic-gate
2497c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
2507c478bd9Sstevel@tonic-gate static int
hsfs_getattr(struct vnode * vp,struct vattr * vap,int flags,struct cred * cred,caller_context_t * ct)251*ade42b55SSebastien Roy hsfs_getattr(struct vnode *vp, struct vattr *vap, int flags, struct cred *cred,
252*ade42b55SSebastien Roy caller_context_t *ct)
2537c478bd9Sstevel@tonic-gate {
2547c478bd9Sstevel@tonic-gate struct hsnode *hp;
2557c478bd9Sstevel@tonic-gate struct vfs *vfsp;
2567c478bd9Sstevel@tonic-gate struct hsfs *fsp;
2577c478bd9Sstevel@tonic-gate
2587c478bd9Sstevel@tonic-gate hp = VTOH(vp);
2597c478bd9Sstevel@tonic-gate fsp = VFS_TO_HSFS(vp->v_vfsp);
2607c478bd9Sstevel@tonic-gate vfsp = vp->v_vfsp;
2617c478bd9Sstevel@tonic-gate
2627c478bd9Sstevel@tonic-gate if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
2637c478bd9Sstevel@tonic-gate hs_filldirent(vp, &hp->hs_dirent);
2647c478bd9Sstevel@tonic-gate }
2657c478bd9Sstevel@tonic-gate vap->va_type = IFTOVT(hp->hs_dirent.mode);
2667c478bd9Sstevel@tonic-gate vap->va_mode = hp->hs_dirent.mode;
2677c478bd9Sstevel@tonic-gate vap->va_uid = hp->hs_dirent.uid;
2687c478bd9Sstevel@tonic-gate vap->va_gid = hp->hs_dirent.gid;
2697c478bd9Sstevel@tonic-gate
2707c478bd9Sstevel@tonic-gate vap->va_fsid = vfsp->vfs_dev;
2717c478bd9Sstevel@tonic-gate vap->va_nodeid = (ino64_t)hp->hs_nodeid;
2727c478bd9Sstevel@tonic-gate vap->va_nlink = hp->hs_dirent.nlink;
2737c478bd9Sstevel@tonic-gate vap->va_size = (offset_t)hp->hs_dirent.ext_size;
2747c478bd9Sstevel@tonic-gate
2757c478bd9Sstevel@tonic-gate vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
2767c478bd9Sstevel@tonic-gate vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
2777c478bd9Sstevel@tonic-gate vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
2787c478bd9Sstevel@tonic-gate vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
2797c478bd9Sstevel@tonic-gate vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
2807c478bd9Sstevel@tonic-gate vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
2817c478bd9Sstevel@tonic-gate if (vp->v_type == VCHR || vp->v_type == VBLK)
2827c478bd9Sstevel@tonic-gate vap->va_rdev = hp->hs_dirent.r_dev;
2837c478bd9Sstevel@tonic-gate else
2847c478bd9Sstevel@tonic-gate vap->va_rdev = 0;
2857c478bd9Sstevel@tonic-gate vap->va_blksize = vfsp->vfs_bsize;
2867c478bd9Sstevel@tonic-gate /* no. of blocks = no. of data blocks + no. of xar blocks */
2877c478bd9Sstevel@tonic-gate vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
2887c478bd9Sstevel@tonic-gate (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
2897c478bd9Sstevel@tonic-gate vap->va_seq = hp->hs_seq;
2907c478bd9Sstevel@tonic-gate return (0);
2917c478bd9Sstevel@tonic-gate }
2927c478bd9Sstevel@tonic-gate
2937c478bd9Sstevel@tonic-gate /*ARGSUSED*/
2947c478bd9Sstevel@tonic-gate static int
hsfs_readlink(struct vnode * vp,struct uio * uiop,struct cred * cred,caller_context_t * ct)295*ade42b55SSebastien Roy hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred,
296*ade42b55SSebastien Roy caller_context_t *ct)
2977c478bd9Sstevel@tonic-gate {
2987c478bd9Sstevel@tonic-gate struct hsnode *hp;
2997c478bd9Sstevel@tonic-gate
3007c478bd9Sstevel@tonic-gate if (vp->v_type != VLNK)
3017c478bd9Sstevel@tonic-gate return (EINVAL);
3027c478bd9Sstevel@tonic-gate
3037c478bd9Sstevel@tonic-gate hp = VTOH(vp);
3047c478bd9Sstevel@tonic-gate
3057c478bd9Sstevel@tonic-gate if (hp->hs_dirent.sym_link == (char *)NULL)
3067c478bd9Sstevel@tonic-gate return (ENOENT);
3077c478bd9Sstevel@tonic-gate
3087c478bd9Sstevel@tonic-gate return (uiomove(hp->hs_dirent.sym_link,
3097c478bd9Sstevel@tonic-gate (size_t)MIN(hp->hs_dirent.ext_size,
3107c478bd9Sstevel@tonic-gate uiop->uio_resid), UIO_READ, uiop));
3117c478bd9Sstevel@tonic-gate }
3127c478bd9Sstevel@tonic-gate
3137c478bd9Sstevel@tonic-gate /*ARGSUSED*/
3147c478bd9Sstevel@tonic-gate static void
hsfs_inactive(struct vnode * vp,struct cred * cred,caller_context_t * ct)315*ade42b55SSebastien Roy hsfs_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
3167c478bd9Sstevel@tonic-gate {
3177c478bd9Sstevel@tonic-gate struct hsnode *hp;
3187c478bd9Sstevel@tonic-gate struct hsfs *fsp;
3197c478bd9Sstevel@tonic-gate
3207c478bd9Sstevel@tonic-gate int nopage;
3217c478bd9Sstevel@tonic-gate
3227c478bd9Sstevel@tonic-gate hp = VTOH(vp);
3237c478bd9Sstevel@tonic-gate fsp = VFS_TO_HSFS(vp->v_vfsp);
3247c478bd9Sstevel@tonic-gate /*
3257c478bd9Sstevel@tonic-gate * Note: acquiring and holding v_lock for quite a while
3267c478bd9Sstevel@tonic-gate * here serializes on the vnode; this is unfortunate, but
3277c478bd9Sstevel@tonic-gate * likely not to overly impact performance, as the underlying
3287c478bd9Sstevel@tonic-gate * device (CDROM drive) is quite slow.
3297c478bd9Sstevel@tonic-gate */
3307c478bd9Sstevel@tonic-gate rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
3317c478bd9Sstevel@tonic-gate mutex_enter(&hp->hs_contents_lock);
3327c478bd9Sstevel@tonic-gate mutex_enter(&vp->v_lock);
3337c478bd9Sstevel@tonic-gate
3347c478bd9Sstevel@tonic-gate if (vp->v_count < 1) {
3357c478bd9Sstevel@tonic-gate panic("hsfs_inactive: v_count < 1");
3367c478bd9Sstevel@tonic-gate /*NOTREACHED*/
3377c478bd9Sstevel@tonic-gate }
3387c478bd9Sstevel@tonic-gate
339*ade42b55SSebastien Roy VN_RELE_LOCKED(vp);
340*ade42b55SSebastien Roy if (vp->v_count > 0 || (hp->hs_flags & HREF) == 0) {
3417c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
3427c478bd9Sstevel@tonic-gate mutex_exit(&hp->hs_contents_lock);
3437c478bd9Sstevel@tonic-gate rw_exit(&fsp->hsfs_hash_lock);
3447c478bd9Sstevel@tonic-gate return;
3457c478bd9Sstevel@tonic-gate }
3467c478bd9Sstevel@tonic-gate if (vp->v_count == 0) {
3477c478bd9Sstevel@tonic-gate /*
3487c478bd9Sstevel@tonic-gate * Free the hsnode.
3497c478bd9Sstevel@tonic-gate * If there are no pages associated with the
3507c478bd9Sstevel@tonic-gate * hsnode, give it back to the kmem_cache,
3517c478bd9Sstevel@tonic-gate * else put at the end of this file system's
3527c478bd9Sstevel@tonic-gate * internal free list.
3537c478bd9Sstevel@tonic-gate */
3547c478bd9Sstevel@tonic-gate nopage = !vn_has_cached_data(vp);
3557c478bd9Sstevel@tonic-gate hp->hs_flags = 0;
3567c478bd9Sstevel@tonic-gate /*
3577c478bd9Sstevel@tonic-gate * exit these locks now, since hs_freenode may
3587c478bd9Sstevel@tonic-gate * kmem_free the hsnode and embedded vnode
3597c478bd9Sstevel@tonic-gate */
3607c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
3617c478bd9Sstevel@tonic-gate mutex_exit(&hp->hs_contents_lock);
3627c478bd9Sstevel@tonic-gate hs_freenode(vp, fsp, nopage);
3637c478bd9Sstevel@tonic-gate } else {
3647c478bd9Sstevel@tonic-gate mutex_exit(&vp->v_lock);
3657c478bd9Sstevel@tonic-gate mutex_exit(&hp->hs_contents_lock);
3667c478bd9Sstevel@tonic-gate }
3677c478bd9Sstevel@tonic-gate rw_exit(&fsp->hsfs_hash_lock);
3687c478bd9Sstevel@tonic-gate }
3697c478bd9Sstevel@tonic-gate
3707c478bd9Sstevel@tonic-gate
3717c478bd9Sstevel@tonic-gate /*ARGSUSED*/
3727c478bd9Sstevel@tonic-gate static int
hsfs_lookup(struct vnode * dvp,char * nm,struct vnode ** vpp,struct pathname * pnp,int flags,struct vnode * rdir,struct cred * cred,caller_context_t * ct,int * direntflags,pathname_t * realpnp)373*ade42b55SSebastien Roy hsfs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
374*ade42b55SSebastien Roy struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cred,
375*ade42b55SSebastien Roy caller_context_t *ct, int *direntflags, pathname_t *realpnp)
3767c478bd9Sstevel@tonic-gate {
3777c478bd9Sstevel@tonic-gate int error;
3787c478bd9Sstevel@tonic-gate int namelen = (int)strlen(nm);
3797c478bd9Sstevel@tonic-gate
3807c478bd9Sstevel@tonic-gate if (*nm == '\0') {
3817c478bd9Sstevel@tonic-gate VN_HOLD(dvp);
3827c478bd9Sstevel@tonic-gate *vpp = dvp;
3837c478bd9Sstevel@tonic-gate return (0);
3847c478bd9Sstevel@tonic-gate }
3857c478bd9Sstevel@tonic-gate
3867c478bd9Sstevel@tonic-gate /*
3877c478bd9Sstevel@tonic-gate * If we're looking for ourself, life is simple.
3887c478bd9Sstevel@tonic-gate */
3897c478bd9Sstevel@tonic-gate if (namelen == 1 && *nm == '.') {
3907c478bd9Sstevel@tonic-gate if (error = hs_access(dvp, (mode_t)VEXEC, cred))
3917c478bd9Sstevel@tonic-gate return (error);
3927c478bd9Sstevel@tonic-gate VN_HOLD(dvp);
3937c478bd9Sstevel@tonic-gate *vpp = dvp;
3947c478bd9Sstevel@tonic-gate return (0);
3957c478bd9Sstevel@tonic-gate }
3967c478bd9Sstevel@tonic-gate
3977c478bd9Sstevel@tonic-gate return (hs_dirlook(dvp, nm, namelen, vpp, cred));
3987c478bd9Sstevel@tonic-gate }
3997c478bd9Sstevel@tonic-gate
4007c478bd9Sstevel@tonic-gate
4017c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4027c478bd9Sstevel@tonic-gate static int
hsfs_readdir(struct vnode * vp,struct uio * uiop,struct cred * cred,int * eofp,caller_context_t * ct,int flags)403*ade42b55SSebastien Roy hsfs_readdir(struct vnode *vp, struct uio *uiop, struct cred *cred, int *eofp,
404*ade42b55SSebastien Roy caller_context_t *ct, int flags)
4057c478bd9Sstevel@tonic-gate {
4067c478bd9Sstevel@tonic-gate struct hsnode *dhp;
4077c478bd9Sstevel@tonic-gate struct hsfs *fsp;
4087c478bd9Sstevel@tonic-gate struct hs_direntry hd;
4097c478bd9Sstevel@tonic-gate struct dirent64 *nd;
4107c478bd9Sstevel@tonic-gate int error;
4117c478bd9Sstevel@tonic-gate uint_t offset; /* real offset in directory */
4127c478bd9Sstevel@tonic-gate uint_t dirsiz; /* real size of directory */
4137c478bd9Sstevel@tonic-gate uchar_t *blkp;
4147c478bd9Sstevel@tonic-gate int hdlen; /* length of hs directory entry */
4157c478bd9Sstevel@tonic-gate long ndlen; /* length of dirent entry */
4167c478bd9Sstevel@tonic-gate int bytes_wanted;
4177c478bd9Sstevel@tonic-gate size_t bufsize; /* size of dirent buffer */
4187c478bd9Sstevel@tonic-gate char *outbuf; /* ptr to dirent buffer */
4197c478bd9Sstevel@tonic-gate char *dname;
4207c478bd9Sstevel@tonic-gate int dnamelen;
4217c478bd9Sstevel@tonic-gate size_t dname_size;
4227c478bd9Sstevel@tonic-gate struct fbuf *fbp;
4237c478bd9Sstevel@tonic-gate uint_t last_offset; /* last index into current dir block */
4247c478bd9Sstevel@tonic-gate ino64_t dirino; /* temporary storage before storing in dirent */
4257c478bd9Sstevel@tonic-gate off_t diroff;
4267c478bd9Sstevel@tonic-gate
4277c478bd9Sstevel@tonic-gate dhp = VTOH(vp);
4287c478bd9Sstevel@tonic-gate fsp = VFS_TO_HSFS(vp->v_vfsp);
4297c478bd9Sstevel@tonic-gate if (dhp->hs_dirent.ext_size == 0)
4307c478bd9Sstevel@tonic-gate hs_filldirent(vp, &dhp->hs_dirent);
4317c478bd9Sstevel@tonic-gate dirsiz = dhp->hs_dirent.ext_size;
4327c478bd9Sstevel@tonic-gate if (uiop->uio_loffset >= dirsiz) { /* at or beyond EOF */
4337c478bd9Sstevel@tonic-gate if (eofp)
4347c478bd9Sstevel@tonic-gate *eofp = 1;
4357c478bd9Sstevel@tonic-gate return (0);
4367c478bd9Sstevel@tonic-gate }
4379cbc422eSpeterte ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
4389cbc422eSpeterte offset = uiop->uio_loffset;
4397c478bd9Sstevel@tonic-gate
4407c478bd9Sstevel@tonic-gate dname_size = fsp->hsfs_namemax + 1; /* 1 for the ending NUL */
4417c478bd9Sstevel@tonic-gate dname = kmem_alloc(dname_size, KM_SLEEP);
4427c478bd9Sstevel@tonic-gate bufsize = uiop->uio_resid + sizeof (struct dirent64);
4437c478bd9Sstevel@tonic-gate
4447c478bd9Sstevel@tonic-gate outbuf = kmem_alloc(bufsize, KM_SLEEP);
4457c478bd9Sstevel@tonic-gate nd = (struct dirent64 *)outbuf;
4467c478bd9Sstevel@tonic-gate
4477c478bd9Sstevel@tonic-gate while (offset < dirsiz) {
448cf83459aSfrankho bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
4497c478bd9Sstevel@tonic-gate
4507c478bd9Sstevel@tonic-gate error = fbread(vp, (offset_t)(offset & MAXBMASK),
451d10b6702Sfrankho (unsigned int)bytes_wanted, S_READ, &fbp);
4527c478bd9Sstevel@tonic-gate if (error)
4537c478bd9Sstevel@tonic-gate goto done;
4547c478bd9Sstevel@tonic-gate
4557c478bd9Sstevel@tonic-gate blkp = (uchar_t *)fbp->fb_addr;
456cf83459aSfrankho last_offset = (offset & MAXBMASK) + fbp->fb_count;
4577c478bd9Sstevel@tonic-gate
4587c478bd9Sstevel@tonic-gate #define rel_offset(offset) ((offset) & MAXBOFFSET) /* index into blkp */
4597c478bd9Sstevel@tonic-gate
4607c478bd9Sstevel@tonic-gate while (offset < last_offset) {
4617c478bd9Sstevel@tonic-gate /*
462cf83459aSfrankho * Very similar validation code is found in
463cf83459aSfrankho * process_dirblock(), hsfs_node.c.
464cf83459aSfrankho * For an explanation, see there.
465cf83459aSfrankho * It may make sense for the future to
466cf83459aSfrankho * "consolidate" the code in hs_parsedir(),
467cf83459aSfrankho * process_dirblock() and hsfs_readdir() into
468cf83459aSfrankho * a single utility function.
4697c478bd9Sstevel@tonic-gate */
4707c478bd9Sstevel@tonic-gate hdlen = (int)((uchar_t)
471d10b6702Sfrankho HDE_DIR_LEN(&blkp[rel_offset(offset)]));
472cf83459aSfrankho if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
473cf83459aSfrankho offset + hdlen > last_offset) {
4747c478bd9Sstevel@tonic-gate /*
475cf83459aSfrankho * advance to next sector boundary
4767c478bd9Sstevel@tonic-gate */
477cf83459aSfrankho offset = roundup(offset + 1, HS_SECTOR_SIZE);
478cf83459aSfrankho if (hdlen)
479cf83459aSfrankho hs_log_bogus_disk_warning(fsp,
480cf83459aSfrankho HSFS_ERR_TRAILING_JUNK, 0);
481cf83459aSfrankho
482cf83459aSfrankho continue;
4837c478bd9Sstevel@tonic-gate }
4847c478bd9Sstevel@tonic-gate
4857c478bd9Sstevel@tonic-gate bzero(&hd, sizeof (hd));
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate /*
4887c478bd9Sstevel@tonic-gate * Just ignore invalid directory entries.
4897c478bd9Sstevel@tonic-gate * XXX - maybe hs_parsedir() will detect EXISTENCE bit
4907c478bd9Sstevel@tonic-gate */
4917c478bd9Sstevel@tonic-gate if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
492d10b6702Sfrankho &hd, dname, &dnamelen, last_offset - offset)) {
4937c478bd9Sstevel@tonic-gate /*
4947c478bd9Sstevel@tonic-gate * Determine if there is enough room
4957c478bd9Sstevel@tonic-gate */
4967c478bd9Sstevel@tonic-gate ndlen = (long)DIRENT64_RECLEN((dnamelen));
4977c478bd9Sstevel@tonic-gate
4987c478bd9Sstevel@tonic-gate if ((ndlen + ((char *)nd - outbuf)) >
4997c478bd9Sstevel@tonic-gate uiop->uio_resid) {
5007c478bd9Sstevel@tonic-gate fbrelse(fbp, S_READ);
5017c478bd9Sstevel@tonic-gate goto done; /* output buffer full */
5027c478bd9Sstevel@tonic-gate }
5037c478bd9Sstevel@tonic-gate
5047c478bd9Sstevel@tonic-gate diroff = offset + hdlen;
5057c478bd9Sstevel@tonic-gate /*
506d10b6702Sfrankho * If the media carries rrip-v1.12 or newer,
507d10b6702Sfrankho * and we trust the inodes from the rrip data
508d10b6702Sfrankho * (use_rrip_inodes != 0), use that data. If the
509d10b6702Sfrankho * media has been created by a recent mkisofs
510d10b6702Sfrankho * version, we may trust all numbers in the
511d10b6702Sfrankho * starting extent number; otherwise, we cannot
512d10b6702Sfrankho * do this for zero sized files and symlinks,
513d10b6702Sfrankho * because if we did we'd end up mapping all of
514d10b6702Sfrankho * them to the same node. We use HS_DUMMY_INO
515d10b6702Sfrankho * in this case and make sure that we will not
516d10b6702Sfrankho * map all files to the same meta data.
5177c478bd9Sstevel@tonic-gate */
518d10b6702Sfrankho if (hd.inode != 0 && use_rrip_inodes) {
519d10b6702Sfrankho dirino = hd.inode;
520d10b6702Sfrankho } else if ((hd.ext_size == 0 ||
521d10b6702Sfrankho hd.sym_link != (char *)NULL) &&
522d10b6702Sfrankho (fsp->hsfs_flags & HSFSMNT_INODE) == 0) {
523d10b6702Sfrankho dirino = HS_DUMMY_INO;
5247c478bd9Sstevel@tonic-gate } else {
525d10b6702Sfrankho dirino = hd.ext_lbn;
5267c478bd9Sstevel@tonic-gate }
5277c478bd9Sstevel@tonic-gate
5287c478bd9Sstevel@tonic-gate /* strncpy(9f) will zero uninitialized bytes */
5297c478bd9Sstevel@tonic-gate
5307c478bd9Sstevel@tonic-gate ASSERT(strlen(dname) + 1 <=
5317c478bd9Sstevel@tonic-gate DIRENT64_NAMELEN(ndlen));
5327c478bd9Sstevel@tonic-gate (void) strncpy(nd->d_name, dname,
5337c478bd9Sstevel@tonic-gate DIRENT64_NAMELEN(ndlen));
5347c478bd9Sstevel@tonic-gate nd->d_reclen = (ushort_t)ndlen;
5357c478bd9Sstevel@tonic-gate nd->d_off = (offset_t)diroff;
5367c478bd9Sstevel@tonic-gate nd->d_ino = dirino;
5377c478bd9Sstevel@tonic-gate nd = (struct dirent64 *)((char *)nd + ndlen);
5387c478bd9Sstevel@tonic-gate
5397c478bd9Sstevel@tonic-gate /*
5407c478bd9Sstevel@tonic-gate * free up space allocated for symlink
5417c478bd9Sstevel@tonic-gate */
5427c478bd9Sstevel@tonic-gate if (hd.sym_link != (char *)NULL) {
5437c478bd9Sstevel@tonic-gate kmem_free(hd.sym_link,
5447c478bd9Sstevel@tonic-gate (size_t)(hd.ext_size+1));
5457c478bd9Sstevel@tonic-gate hd.sym_link = (char *)NULL;
5467c478bd9Sstevel@tonic-gate }
5477c478bd9Sstevel@tonic-gate }
5487c478bd9Sstevel@tonic-gate offset += hdlen;
5497c478bd9Sstevel@tonic-gate }
5507c478bd9Sstevel@tonic-gate fbrelse(fbp, S_READ);
5517c478bd9Sstevel@tonic-gate }
5527c478bd9Sstevel@tonic-gate
5537c478bd9Sstevel@tonic-gate /*
5547c478bd9Sstevel@tonic-gate * Got here for one of the following reasons:
5557c478bd9Sstevel@tonic-gate * 1) outbuf is full (error == 0)
5567c478bd9Sstevel@tonic-gate * 2) end of directory reached (error == 0)
5577c478bd9Sstevel@tonic-gate * 3) error reading directory sector (error != 0)
5587c478bd9Sstevel@tonic-gate * 4) directory entry crosses sector boundary (error == 0)
5597c478bd9Sstevel@tonic-gate *
5607c478bd9Sstevel@tonic-gate * If any directory entries have been copied, don't report
5617c478bd9Sstevel@tonic-gate * case 4. Instead, return the valid directory entries.
5627c478bd9Sstevel@tonic-gate *
5637c478bd9Sstevel@tonic-gate * If no entries have been copied, report the error.
5647c478bd9Sstevel@tonic-gate * If case 4, this will be indistiguishable from EOF.
5657c478bd9Sstevel@tonic-gate */
5667c478bd9Sstevel@tonic-gate done:
5677c478bd9Sstevel@tonic-gate ndlen = ((char *)nd - outbuf);
5687c478bd9Sstevel@tonic-gate if (ndlen != 0) {
5697c478bd9Sstevel@tonic-gate error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
5709cbc422eSpeterte uiop->uio_loffset = offset;
5717c478bd9Sstevel@tonic-gate }
5727c478bd9Sstevel@tonic-gate kmem_free(dname, dname_size);
5737c478bd9Sstevel@tonic-gate kmem_free(outbuf, bufsize);
5747c478bd9Sstevel@tonic-gate if (eofp && error == 0)
5759cbc422eSpeterte *eofp = (uiop->uio_loffset >= dirsiz);
5767c478bd9Sstevel@tonic-gate return (error);
5777c478bd9Sstevel@tonic-gate }
5787c478bd9Sstevel@tonic-gate
579da6c28aaSamw /*ARGSUSED2*/
5807c478bd9Sstevel@tonic-gate static int
hsfs_fid(struct vnode * vp,struct fid * fidp,caller_context_t * ct)581da6c28aaSamw hsfs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
5827c478bd9Sstevel@tonic-gate {
5837c478bd9Sstevel@tonic-gate struct hsnode *hp;
5847c478bd9Sstevel@tonic-gate struct hsfid *fid;
5857c478bd9Sstevel@tonic-gate
5867c478bd9Sstevel@tonic-gate if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
5877c478bd9Sstevel@tonic-gate fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
5887c478bd9Sstevel@tonic-gate return (ENOSPC);
5897c478bd9Sstevel@tonic-gate }
5907c478bd9Sstevel@tonic-gate
5917c478bd9Sstevel@tonic-gate fid = (struct hsfid *)fidp;
5927c478bd9Sstevel@tonic-gate fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
5937c478bd9Sstevel@tonic-gate hp = VTOH(vp);
5947c478bd9Sstevel@tonic-gate mutex_enter(&hp->hs_contents_lock);
5957c478bd9Sstevel@tonic-gate fid->hf_dir_lbn = hp->hs_dir_lbn;
5967c478bd9Sstevel@tonic-gate fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
597d10b6702Sfrankho fid->hf_ino = hp->hs_nodeid;
5987c478bd9Sstevel@tonic-gate mutex_exit(&hp->hs_contents_lock);
5997c478bd9Sstevel@tonic-gate return (0);
6007c478bd9Sstevel@tonic-gate }
6017c478bd9Sstevel@tonic-gate
6027c478bd9Sstevel@tonic-gate /*ARGSUSED*/
6037c478bd9Sstevel@tonic-gate static int
hsfs_open(struct vnode ** vpp,int flag,struct cred * cred,caller_context_t * ct)604*ade42b55SSebastien Roy hsfs_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
6057c478bd9Sstevel@tonic-gate {
6067c478bd9Sstevel@tonic-gate return (0);
6077c478bd9Sstevel@tonic-gate }
6087c478bd9Sstevel@tonic-gate
6097c478bd9Sstevel@tonic-gate /*ARGSUSED*/
6107c478bd9Sstevel@tonic-gate static int
hsfs_close(struct vnode * vp,int flag,int count,offset_t offset,struct cred * cred,caller_context_t * ct)611*ade42b55SSebastien Roy hsfs_close(struct vnode *vp, int flag, int count, offset_t offset,
612*ade42b55SSebastien Roy struct cred *cred, caller_context_t *ct)
6137c478bd9Sstevel@tonic-gate {
6147c478bd9Sstevel@tonic-gate (void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
6157c478bd9Sstevel@tonic-gate cleanshares(vp, ttoproc(curthread)->p_pid);
6167c478bd9Sstevel@tonic-gate return (0);
6177c478bd9Sstevel@tonic-gate }
6187c478bd9Sstevel@tonic-gate
6197c478bd9Sstevel@tonic-gate /*ARGSUSED2*/
6207c478bd9Sstevel@tonic-gate static int
hsfs_access(struct vnode * vp,int mode,int flags,cred_t * cred,caller_context_t * ct)621*ade42b55SSebastien Roy hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred,
622*ade42b55SSebastien Roy caller_context_t *ct)
6237c478bd9Sstevel@tonic-gate {
6247c478bd9Sstevel@tonic-gate return (hs_access(vp, (mode_t)mode, cred));
6257c478bd9Sstevel@tonic-gate }
6267c478bd9Sstevel@tonic-gate
6277c478bd9Sstevel@tonic-gate /*
6287c478bd9Sstevel@tonic-gate * the seek time of a CD-ROM is very slow, and data transfer
6297c478bd9Sstevel@tonic-gate * rate is even worse (max. 150K per sec). The design
6307c478bd9Sstevel@tonic-gate * decision is to reduce access to cd-rom as much as possible,
6317c478bd9Sstevel@tonic-gate * and to transfer a sizable block (read-ahead) of data at a time.
6327c478bd9Sstevel@tonic-gate * UFS style of read ahead one block at a time is not appropriate,
6337c478bd9Sstevel@tonic-gate * and is not supported
6347c478bd9Sstevel@tonic-gate */
6357c478bd9Sstevel@tonic-gate
6367c478bd9Sstevel@tonic-gate /*
6377c478bd9Sstevel@tonic-gate * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
6387c478bd9Sstevel@tonic-gate */
6397c478bd9Sstevel@tonic-gate #define KLUSTSIZE (56 * 1024)
6407c478bd9Sstevel@tonic-gate /* we don't support read ahead */
6417c478bd9Sstevel@tonic-gate int hsfs_lostpage; /* no. of times we lost original page */
6427c478bd9Sstevel@tonic-gate
6437c478bd9Sstevel@tonic-gate /*
6447c478bd9Sstevel@tonic-gate * Used to prevent biodone() from releasing buf resources that
6457c478bd9Sstevel@tonic-gate * we didn't allocate in quite the usual way.
6467c478bd9Sstevel@tonic-gate */
6477c478bd9Sstevel@tonic-gate /*ARGSUSED*/
6487c478bd9Sstevel@tonic-gate int
hsfs_iodone(struct buf * bp)6497c478bd9Sstevel@tonic-gate hsfs_iodone(struct buf *bp)
6507c478bd9Sstevel@tonic-gate {
6517c478bd9Sstevel@tonic-gate sema_v(&bp->b_io);
6527c478bd9Sstevel@tonic-gate return (0);
6537c478bd9Sstevel@tonic-gate }
6547c478bd9Sstevel@tonic-gate
65584b82766Smg /*
65684b82766Smg * The taskq thread that invokes the scheduling function to ensure
65784b82766Smg * that all readaheads are complete and cleans up the associated
65884b82766Smg * memory and releases the page lock.
65984b82766Smg */
66084b82766Smg void
hsfs_ra_task(void * arg)66184b82766Smg hsfs_ra_task(void *arg)
66284b82766Smg {
66384b82766Smg struct hio_info *info = arg;
66484b82766Smg uint_t count;
66584b82766Smg struct buf *wbuf;
66684b82766Smg
66784b82766Smg ASSERT(info->pp != NULL);
66884b82766Smg
66984b82766Smg for (count = 0; count < info->bufsused; count++) {
67084b82766Smg wbuf = &(info->bufs[count]);
67184b82766Smg
67284b82766Smg DTRACE_PROBE1(hsfs_io_wait_ra, struct buf *, wbuf);
67384b82766Smg while (sema_tryp(&(info->sema[count])) == 0) {
67484b82766Smg if (hsched_invoke_strategy(info->fsp)) {
67584b82766Smg sema_p(&(info->sema[count]));
67684b82766Smg break;
67784b82766Smg }
67884b82766Smg }
67984b82766Smg sema_destroy(&(info->sema[count]));
68084b82766Smg DTRACE_PROBE1(hsfs_io_done_ra, struct buf *, wbuf);
68184b82766Smg biofini(&(info->bufs[count]));
68284b82766Smg }
68384b82766Smg for (count = 0; count < info->bufsused; count++) {
68484b82766Smg if (info->vas[count] != NULL) {
68584b82766Smg ppmapout(info->vas[count]);
68684b82766Smg }
68784b82766Smg }
68884b82766Smg kmem_free(info->vas, info->bufcnt * sizeof (caddr_t));
68984b82766Smg kmem_free(info->bufs, info->bufcnt * sizeof (struct buf));
69084b82766Smg kmem_free(info->sema, info->bufcnt * sizeof (ksema_t));
69184b82766Smg
69284b82766Smg pvn_read_done(info->pp, 0);
69384b82766Smg kmem_cache_free(hio_info_cache, info);
69484b82766Smg }
69584b82766Smg
69684b82766Smg /*
69784b82766Smg * Submit asynchronous readahead requests to the I/O scheduler
69884b82766Smg * depending on the number of pages to read ahead. These requests
69984b82766Smg * are asynchronous to the calling thread but I/O requests issued
70084b82766Smg * subsequently by other threads with higher LBNs must wait for
70184b82766Smg * these readaheads to complete since we have a single ordered
70284b82766Smg * I/O pipeline. Thus these readaheads are semi-asynchronous.
70384b82766Smg * A TaskQ handles waiting for the readaheads to complete.
70484b82766Smg *
70584b82766Smg * This function is mostly a copy of hsfs_getapage but somewhat
70684b82766Smg * simpler. A readahead request is aborted if page allocation
70784b82766Smg * fails.
70884b82766Smg */
70984b82766Smg /*ARGSUSED*/
71084b82766Smg static int
hsfs_getpage_ra(struct vnode * vp,u_offset_t off,struct seg * seg,caddr_t addr,struct hsnode * hp,struct hsfs * fsp,int xarsiz,offset_t bof,int chunk_lbn_count,int chunk_data_bytes)711*ade42b55SSebastien Roy hsfs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg,
712*ade42b55SSebastien Roy caddr_t addr, struct hsnode *hp, struct hsfs *fsp, int xarsiz,
713*ade42b55SSebastien Roy offset_t bof, int chunk_lbn_count, int chunk_data_bytes)
71484b82766Smg {
71584b82766Smg struct buf *bufs;
71684b82766Smg caddr_t *vas;
71784b82766Smg caddr_t va;
71884b82766Smg struct page *pp, *searchp, *lastp;
71984b82766Smg struct vnode *devvp;
72084b82766Smg ulong_t byte_offset;
72184b82766Smg size_t io_len_tmp;
72284b82766Smg uint_t io_off, io_len;
72384b82766Smg uint_t xlen;
72484b82766Smg uint_t filsiz;
72584b82766Smg uint_t secsize;
72684b82766Smg uint_t bufcnt;
72784b82766Smg uint_t bufsused;
72884b82766Smg uint_t count;
72984b82766Smg uint_t io_end;
73084b82766Smg uint_t which_chunk_lbn;
73184b82766Smg uint_t offset_lbn;
73284b82766Smg uint_t offset_extra;
73384b82766Smg offset_t offset_bytes;
73484b82766Smg uint_t remaining_bytes;
73584b82766Smg uint_t extension;
73684b82766Smg int remainder; /* must be signed */
73784b82766Smg diskaddr_t driver_block;
73884b82766Smg u_offset_t io_off_tmp;
73984b82766Smg ksema_t *fio_done;
74084b82766Smg struct hio_info *info;
74184b82766Smg size_t len;
74284b82766Smg
74384b82766Smg ASSERT(fsp->hqueue != NULL);
74484b82766Smg
74584b82766Smg if (addr >= seg->s_base + seg->s_size) {
74684b82766Smg return (-1);
74784b82766Smg }
74884b82766Smg
74984b82766Smg devvp = fsp->hsfs_devvp;
75084b82766Smg secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
75184b82766Smg
75284b82766Smg /* file data size */
75384b82766Smg filsiz = hp->hs_dirent.ext_size;
75484b82766Smg
75584b82766Smg if (off >= filsiz)
75684b82766Smg return (0);
75784b82766Smg
75884b82766Smg extension = 0;
75984b82766Smg pp = NULL;
76084b82766Smg
76184b82766Smg extension += hp->hs_ra_bytes;
76284b82766Smg
76384b82766Smg /*
764f9ec9c5aSmg * Some CD writers (e.g. Kodak Photo CD writers)
765f9ec9c5aSmg * create CDs in TAO mode and reserve tracks that
766f9ec9c5aSmg * are not completely written. Some sectors remain
767f9ec9c5aSmg * unreadable for this reason and give I/O errors.
768f9ec9c5aSmg * Also, there's no point in reading sectors
769f9ec9c5aSmg * we'll never look at. So, if we're asked to go
770f9ec9c5aSmg * beyond the end of a file, truncate to the length
771f9ec9c5aSmg * of that file.
77284b82766Smg *
773f9ec9c5aSmg * Additionally, this behaviour is required by section
774f9ec9c5aSmg * 6.4.5 of ISO 9660:1988(E).
77584b82766Smg */
77684b82766Smg len = MIN(extension ? extension : PAGESIZE, filsiz - off);
77784b82766Smg
77884b82766Smg /* A little paranoia */
77984b82766Smg if (len <= 0)
78084b82766Smg return (-1);
78184b82766Smg
78284b82766Smg /*
78384b82766Smg * After all that, make sure we're asking for things in units
78484b82766Smg * that bdev_strategy() will understand (see bug 4202551).
78584b82766Smg */
78684b82766Smg len = roundup(len, DEV_BSIZE);
78784b82766Smg
78884b82766Smg pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
78984b82766Smg &io_len_tmp, off, len, 1);
79084b82766Smg
79184b82766Smg if (pp == NULL) {
79284b82766Smg hp->hs_num_contig = 0;
79384b82766Smg hp->hs_ra_bytes = 0;
79484b82766Smg hp->hs_prev_offset = 0;
79584b82766Smg return (-1);
79684b82766Smg }
79784b82766Smg
79884b82766Smg io_off = (uint_t)io_off_tmp;
79984b82766Smg io_len = (uint_t)io_len_tmp;
80084b82766Smg
80184b82766Smg /* check for truncation */
80284b82766Smg /*
80384b82766Smg * xxx Clean up and return EIO instead?
80484b82766Smg * xxx Ought to go to u_offset_t for everything, but we
80584b82766Smg * xxx call lots of things that want uint_t arguments.
80684b82766Smg */
80784b82766Smg ASSERT(io_off == io_off_tmp);
80884b82766Smg
80984b82766Smg /*
81084b82766Smg * get enough buffers for worst-case scenario
81184b82766Smg * (i.e., no coalescing possible).
81284b82766Smg */
81384b82766Smg bufcnt = (len + secsize - 1) / secsize;
81484b82766Smg bufs = kmem_alloc(bufcnt * sizeof (struct buf), KM_SLEEP);
81584b82766Smg vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
81684b82766Smg
81784b82766Smg /*
81884b82766Smg * Allocate a array of semaphores since we are doing I/O
81984b82766Smg * scheduling.
82084b82766Smg */
82184b82766Smg fio_done = kmem_alloc(bufcnt * sizeof (ksema_t), KM_SLEEP);
82284b82766Smg
82384b82766Smg /*
82484b82766Smg * If our filesize is not an integer multiple of PAGESIZE,
82584b82766Smg * we zero that part of the last page that's between EOF and
82684b82766Smg * the PAGESIZE boundary.
82784b82766Smg */
82884b82766Smg xlen = io_len & PAGEOFFSET;
82984b82766Smg if (xlen != 0)
83084b82766Smg pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
83184b82766Smg
83284b82766Smg DTRACE_PROBE2(hsfs_readahead, struct vnode *, vp, uint_t, io_len);
83384b82766Smg
83484b82766Smg va = NULL;
83584b82766Smg lastp = NULL;
83684b82766Smg searchp = pp;
83784b82766Smg io_end = io_off + io_len;
83884b82766Smg for (count = 0, byte_offset = io_off;
83984b82766Smg byte_offset < io_end;
84084b82766Smg count++) {
84184b82766Smg ASSERT(count < bufcnt);
84284b82766Smg
84384b82766Smg bioinit(&bufs[count]);
84484b82766Smg bufs[count].b_edev = devvp->v_rdev;
84584b82766Smg bufs[count].b_dev = cmpdev(devvp->v_rdev);
84684b82766Smg bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
84784b82766Smg bufs[count].b_iodone = hsfs_iodone;
84884b82766Smg bufs[count].b_vp = vp;
84984b82766Smg bufs[count].b_file = vp;
85084b82766Smg
85184b82766Smg /* Compute disk address for interleaving. */
85284b82766Smg
85384b82766Smg /* considered without skips */
85484b82766Smg which_chunk_lbn = byte_offset / chunk_data_bytes;
85584b82766Smg
85684b82766Smg /* factor in skips */
85784b82766Smg offset_lbn = which_chunk_lbn * chunk_lbn_count;
85884b82766Smg
85984b82766Smg /* convert to physical byte offset for lbn */
86084b82766Smg offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
86184b82766Smg
86284b82766Smg /* don't forget offset into lbn */
86384b82766Smg offset_extra = byte_offset % chunk_data_bytes;
86484b82766Smg
86584b82766Smg /* get virtual block number for driver */
86684b82766Smg driver_block = lbtodb(bof + xarsiz
86784b82766Smg + offset_bytes + offset_extra);
86884b82766Smg
86984b82766Smg if (lastp != searchp) {
87084b82766Smg /* this branch taken first time through loop */
87184b82766Smg va = vas[count] = ppmapin(searchp, PROT_WRITE,
87284b82766Smg (caddr_t)-1);
87384b82766Smg /* ppmapin() guarantees not to return NULL */
87484b82766Smg } else {
87584b82766Smg vas[count] = NULL;
87684b82766Smg }
87784b82766Smg
87884b82766Smg bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
87984b82766Smg bufs[count].b_offset =
88084b82766Smg (offset_t)(byte_offset - io_off + off);
88184b82766Smg
88284b82766Smg /*
88384b82766Smg * We specifically use the b_lblkno member here
88484b82766Smg * as even in the 32 bit world driver_block can
88584b82766Smg * get very large in line with the ISO9660 spec.
88684b82766Smg */
88784b82766Smg
88884b82766Smg bufs[count].b_lblkno = driver_block;
88984b82766Smg
89084b82766Smg remaining_bytes = ((which_chunk_lbn + 1) * chunk_data_bytes)
89184b82766Smg - byte_offset;
89284b82766Smg
89384b82766Smg /*
89484b82766Smg * remaining_bytes can't be zero, as we derived
89584b82766Smg * which_chunk_lbn directly from byte_offset.
89684b82766Smg */
89784b82766Smg if ((remaining_bytes + byte_offset) < (off + len)) {
89884b82766Smg /* coalesce-read the rest of the chunk */
89984b82766Smg bufs[count].b_bcount = remaining_bytes;
90084b82766Smg } else {
90184b82766Smg /* get the final bits */
90284b82766Smg bufs[count].b_bcount = off + len - byte_offset;
90384b82766Smg }
90484b82766Smg
90584b82766Smg remainder = PAGESIZE - (byte_offset % PAGESIZE);
90684b82766Smg if (bufs[count].b_bcount > remainder) {
90784b82766Smg bufs[count].b_bcount = remainder;
90884b82766Smg }
90984b82766Smg
91084b82766Smg bufs[count].b_bufsize = bufs[count].b_bcount;
91184b82766Smg if (((offset_t)byte_offset + bufs[count].b_bcount) >
91284b82766Smg HS_MAXFILEOFF) {
91384b82766Smg break;
91484b82766Smg }
91584b82766Smg byte_offset += bufs[count].b_bcount;
91684b82766Smg
91784b82766Smg /*
91884b82766Smg * We are scheduling I/O so we need to enqueue
91984b82766Smg * requests rather than calling bdev_strategy
92084b82766Smg * here. A later invocation of the scheduling
92184b82766Smg * function will take care of doing the actual
92284b82766Smg * I/O as it selects requests from the queue as
92384b82766Smg * per the scheduling logic.
92484b82766Smg */
92584b82766Smg struct hio *hsio = kmem_cache_alloc(hio_cache,
92684b82766Smg KM_SLEEP);
92784b82766Smg
92884b82766Smg sema_init(&fio_done[count], 0, NULL,
92984b82766Smg SEMA_DEFAULT, NULL);
93084b82766Smg hsio->bp = &bufs[count];
93184b82766Smg hsio->sema = &fio_done[count];
93284b82766Smg hsio->io_lblkno = bufs[count].b_lblkno;
93384b82766Smg hsio->nblocks = howmany(hsio->bp->b_bcount,
93484b82766Smg DEV_BSIZE);
93584b82766Smg
93684b82766Smg /* used for deadline */
93784b82766Smg hsio->io_timestamp = drv_hztousec(ddi_get_lbolt());
93884b82766Smg
93984b82766Smg /* for I/O coalescing */
94084b82766Smg hsio->contig_chain = NULL;
94184b82766Smg hsched_enqueue_io(fsp, hsio, 1);
94284b82766Smg
94384b82766Smg lwp_stat_update(LWP_STAT_INBLK, 1);
94484b82766Smg lastp = searchp;
94584b82766Smg if ((remainder - bufs[count].b_bcount) < 1) {
94684b82766Smg searchp = searchp->p_next;
94784b82766Smg }
94884b82766Smg }
94984b82766Smg
95084b82766Smg bufsused = count;
95184b82766Smg info = kmem_cache_alloc(hio_info_cache, KM_SLEEP);
95284b82766Smg info->bufs = bufs;
95384b82766Smg info->vas = vas;
95484b82766Smg info->sema = fio_done;
95584b82766Smg info->bufsused = bufsused;
95684b82766Smg info->bufcnt = bufcnt;
95784b82766Smg info->fsp = fsp;
95884b82766Smg info->pp = pp;
95984b82766Smg
96084b82766Smg (void) taskq_dispatch(fsp->hqueue->ra_task,
96184b82766Smg hsfs_ra_task, info, KM_SLEEP);
96284b82766Smg /*
96384b82766Smg * The I/O locked pages are unlocked in our taskq thread.
96484b82766Smg */
96584b82766Smg return (0);
96684b82766Smg }
96784b82766Smg
9687c478bd9Sstevel@tonic-gate /*
9697c478bd9Sstevel@tonic-gate * Each file may have a different interleaving on disk. This makes
9707c478bd9Sstevel@tonic-gate * things somewhat interesting. The gist is that there are some
9717c478bd9Sstevel@tonic-gate * number of contiguous data sectors, followed by some other number
9727c478bd9Sstevel@tonic-gate * of contiguous skip sectors. The sum of those two sets of sectors
9737c478bd9Sstevel@tonic-gate * defines the interleave size. Unfortunately, it means that we generally
9747c478bd9Sstevel@tonic-gate * can't simply read N sectors starting at a given offset to satisfy
9757c478bd9Sstevel@tonic-gate * any given request.
9767c478bd9Sstevel@tonic-gate *
9777c478bd9Sstevel@tonic-gate * What we do is get the relevant memory pages via pvn_read_kluster(),
9787c478bd9Sstevel@tonic-gate * then stride through the interleaves, setting up a buf for each
9797c478bd9Sstevel@tonic-gate * sector that needs to be brought in. Instead of kmem_alloc'ing
9807c478bd9Sstevel@tonic-gate * space for the sectors, though, we just point at the appropriate
9817c478bd9Sstevel@tonic-gate * spot in the relevant page for each of them. This saves us a bunch
9827c478bd9Sstevel@tonic-gate * of copying.
98384b82766Smg *
98484b82766Smg * NOTICE: The code below in hsfs_getapage is mostly same as the code
98584b82766Smg * in hsfs_getpage_ra above (with some omissions). If you are
98684b82766Smg * making any change to this function, please also look at
98784b82766Smg * hsfs_getpage_ra.
9887c478bd9Sstevel@tonic-gate */
9897c478bd9Sstevel@tonic-gate /*ARGSUSED*/
9907c478bd9Sstevel@tonic-gate static int
hsfs_getapage(struct vnode * vp,u_offset_t off,size_t len,uint_t * protp,struct page * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,struct cred * cred)991*ade42b55SSebastien Roy hsfs_getapage(struct vnode *vp, u_offset_t off, size_t len, uint_t *protp,
992*ade42b55SSebastien Roy struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
993*ade42b55SSebastien Roy enum seg_rw rw, struct cred *cred)
9947c478bd9Sstevel@tonic-gate {
9957c478bd9Sstevel@tonic-gate struct hsnode *hp;
9967c478bd9Sstevel@tonic-gate struct hsfs *fsp;
9977c478bd9Sstevel@tonic-gate int err;
9987c478bd9Sstevel@tonic-gate struct buf *bufs;
9997c478bd9Sstevel@tonic-gate caddr_t *vas;
10007c478bd9Sstevel@tonic-gate caddr_t va;
10017c478bd9Sstevel@tonic-gate struct page *pp, *searchp, *lastp;
10027c478bd9Sstevel@tonic-gate page_t *pagefound;
10037c478bd9Sstevel@tonic-gate offset_t bof;
10047c478bd9Sstevel@tonic-gate struct vnode *devvp;
10057c478bd9Sstevel@tonic-gate ulong_t byte_offset;
10067c478bd9Sstevel@tonic-gate size_t io_len_tmp;
10077c478bd9Sstevel@tonic-gate uint_t io_off, io_len;
10087c478bd9Sstevel@tonic-gate uint_t xlen;
10097c478bd9Sstevel@tonic-gate uint_t filsiz;
10107c478bd9Sstevel@tonic-gate uint_t secsize;
10117c478bd9Sstevel@tonic-gate uint_t bufcnt;
10127c478bd9Sstevel@tonic-gate uint_t bufsused;
10137c478bd9Sstevel@tonic-gate uint_t count;
10147c478bd9Sstevel@tonic-gate uint_t io_end;
10157c478bd9Sstevel@tonic-gate uint_t which_chunk_lbn;
10167c478bd9Sstevel@tonic-gate uint_t offset_lbn;
10177c478bd9Sstevel@tonic-gate uint_t offset_extra;
10187c478bd9Sstevel@tonic-gate offset_t offset_bytes;
10197c478bd9Sstevel@tonic-gate uint_t remaining_bytes;
10207c478bd9Sstevel@tonic-gate uint_t extension;
10217c478bd9Sstevel@tonic-gate int remainder; /* must be signed */
10227c478bd9Sstevel@tonic-gate int chunk_lbn_count;
10237c478bd9Sstevel@tonic-gate int chunk_data_bytes;
10247c478bd9Sstevel@tonic-gate int xarsiz;
10257c478bd9Sstevel@tonic-gate diskaddr_t driver_block;
10267c478bd9Sstevel@tonic-gate u_offset_t io_off_tmp;
102784b82766Smg ksema_t *fio_done;
102884b82766Smg int calcdone;
10297c478bd9Sstevel@tonic-gate
10307c478bd9Sstevel@tonic-gate /*
10317c478bd9Sstevel@tonic-gate * We don't support asynchronous operation at the moment, so
10327c478bd9Sstevel@tonic-gate * just pretend we did it. If the pages are ever actually
10337c478bd9Sstevel@tonic-gate * needed, they'll get brought in then.
10347c478bd9Sstevel@tonic-gate */
10357c478bd9Sstevel@tonic-gate if (pl == NULL)
10367c478bd9Sstevel@tonic-gate return (0);
10377c478bd9Sstevel@tonic-gate
10387c478bd9Sstevel@tonic-gate hp = VTOH(vp);
10397c478bd9Sstevel@tonic-gate fsp = VFS_TO_HSFS(vp->v_vfsp);
10407c478bd9Sstevel@tonic-gate devvp = fsp->hsfs_devvp;
10417c478bd9Sstevel@tonic-gate secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
10427c478bd9Sstevel@tonic-gate
10437c478bd9Sstevel@tonic-gate /* file data size */
10447c478bd9Sstevel@tonic-gate filsiz = hp->hs_dirent.ext_size;
10457c478bd9Sstevel@tonic-gate
10467c478bd9Sstevel@tonic-gate /* disk addr for start of file */
10477c478bd9Sstevel@tonic-gate bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
10487c478bd9Sstevel@tonic-gate
10497c478bd9Sstevel@tonic-gate /* xarsiz byte must be skipped for data */
10507c478bd9Sstevel@tonic-gate xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
10517c478bd9Sstevel@tonic-gate
10527c478bd9Sstevel@tonic-gate /* how many logical blocks in an interleave (data+skip) */
10537c478bd9Sstevel@tonic-gate chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
10547c478bd9Sstevel@tonic-gate
10557c478bd9Sstevel@tonic-gate if (chunk_lbn_count == 0) {
10567c478bd9Sstevel@tonic-gate chunk_lbn_count = 1;
10577c478bd9Sstevel@tonic-gate }
10587c478bd9Sstevel@tonic-gate
10597c478bd9Sstevel@tonic-gate /*
10607c478bd9Sstevel@tonic-gate * Convert interleaving size into bytes. The zero case
10617c478bd9Sstevel@tonic-gate * (no interleaving) optimization is handled as a side-
10627c478bd9Sstevel@tonic-gate * effect of the read-ahead logic.
10637c478bd9Sstevel@tonic-gate */
10647c478bd9Sstevel@tonic-gate if (hp->hs_dirent.intlf_sz == 0) {
10657c478bd9Sstevel@tonic-gate chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
106684b82766Smg /*
106784b82766Smg * Optimization: If our pagesize is a multiple of LBN
106884b82766Smg * bytes, we can avoid breaking up a page into individual
106984b82766Smg * lbn-sized requests.
107084b82766Smg */
107184b82766Smg if (PAGESIZE % chunk_data_bytes == 0) {
107284b82766Smg chunk_lbn_count = BYTE_TO_LBN(PAGESIZE, vp->v_vfsp);
107384b82766Smg chunk_data_bytes = PAGESIZE;
107484b82766Smg }
10757c478bd9Sstevel@tonic-gate } else {
1076d10b6702Sfrankho chunk_data_bytes =
1077d10b6702Sfrankho LBN_TO_BYTE(hp->hs_dirent.intlf_sz, vp->v_vfsp);
10787c478bd9Sstevel@tonic-gate }
10797c478bd9Sstevel@tonic-gate
10807c478bd9Sstevel@tonic-gate reread:
10817c478bd9Sstevel@tonic-gate err = 0;
10827c478bd9Sstevel@tonic-gate pagefound = 0;
108384b82766Smg calcdone = 0;
10847c478bd9Sstevel@tonic-gate
10857c478bd9Sstevel@tonic-gate /*
10867c478bd9Sstevel@tonic-gate * Do some read-ahead. This mostly saves us a bit of
10877c478bd9Sstevel@tonic-gate * system cpu time more than anything else when doing
10887c478bd9Sstevel@tonic-gate * sequential reads. At some point, could do the
10897c478bd9Sstevel@tonic-gate * read-ahead asynchronously which might gain us something
10907c478bd9Sstevel@tonic-gate * on wall time, but it seems unlikely....
10917c478bd9Sstevel@tonic-gate *
10927c478bd9Sstevel@tonic-gate * We do the easy case here, which is to read through
10937c478bd9Sstevel@tonic-gate * the end of the chunk, minus whatever's at the end that
10947c478bd9Sstevel@tonic-gate * won't exactly fill a page.
10957c478bd9Sstevel@tonic-gate */
109684b82766Smg if (hp->hs_ra_bytes > 0 && chunk_data_bytes != PAGESIZE) {
109784b82766Smg which_chunk_lbn = (off + len) / chunk_data_bytes;
109884b82766Smg extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
109984b82766Smg extension -= (extension % PAGESIZE);
11008cd7c4fcSpeterte } else {
110184b82766Smg extension = roundup(len, PAGESIZE);
11027c478bd9Sstevel@tonic-gate }
11038cd7c4fcSpeterte
110484b82766Smg atomic_inc_64(&fsp->total_pages_requested);
11057c478bd9Sstevel@tonic-gate
11067c478bd9Sstevel@tonic-gate pp = NULL;
11077c478bd9Sstevel@tonic-gate again:
11087c478bd9Sstevel@tonic-gate /* search for page in buffer */
11097c478bd9Sstevel@tonic-gate if ((pagefound = page_exists(vp, off)) == 0) {
11107c478bd9Sstevel@tonic-gate /*
11117c478bd9Sstevel@tonic-gate * Need to really do disk IO to get the page.
11127c478bd9Sstevel@tonic-gate */
111384b82766Smg if (!calcdone) {
111484b82766Smg extension += hp->hs_ra_bytes;
111584b82766Smg
111684b82766Smg /*
111784b82766Smg * Some cd writers don't write sectors that aren't
111884b82766Smg * used. Also, there's no point in reading sectors
111984b82766Smg * we'll never look at. So, if we're asked to go
112084b82766Smg * beyond the end of a file, truncate to the length
112184b82766Smg * of that file.
112284b82766Smg *
112384b82766Smg * Additionally, this behaviour is required by section
112484b82766Smg * 6.4.5 of ISO 9660:1988(E).
112584b82766Smg */
112684b82766Smg len = MIN(extension ? extension : PAGESIZE,
112784b82766Smg filsiz - off);
112884b82766Smg
112984b82766Smg /* A little paranoia. */
113084b82766Smg ASSERT(len > 0);
113184b82766Smg
113284b82766Smg /*
113384b82766Smg * After all that, make sure we're asking for things
113484b82766Smg * in units that bdev_strategy() will understand
113584b82766Smg * (see bug 4202551).
113684b82766Smg */
113784b82766Smg len = roundup(len, DEV_BSIZE);
113884b82766Smg calcdone = 1;
113984b82766Smg }
114084b82766Smg
11417c478bd9Sstevel@tonic-gate pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
11427c478bd9Sstevel@tonic-gate &io_len_tmp, off, len, 0);
11437c478bd9Sstevel@tonic-gate
114484b82766Smg if (pp == NULL) {
114584b82766Smg /*
114684b82766Smg * Pressure on memory, roll back readahead
114784b82766Smg */
114884b82766Smg hp->hs_num_contig = 0;
114984b82766Smg hp->hs_ra_bytes = 0;
115084b82766Smg hp->hs_prev_offset = 0;
11517c478bd9Sstevel@tonic-gate goto again;
115284b82766Smg }
11537c478bd9Sstevel@tonic-gate
11547c478bd9Sstevel@tonic-gate io_off = (uint_t)io_off_tmp;
11557c478bd9Sstevel@tonic-gate io_len = (uint_t)io_len_tmp;
11567c478bd9Sstevel@tonic-gate
11577c478bd9Sstevel@tonic-gate /* check for truncation */
11587c478bd9Sstevel@tonic-gate /*
11597c478bd9Sstevel@tonic-gate * xxx Clean up and return EIO instead?
11607c478bd9Sstevel@tonic-gate * xxx Ought to go to u_offset_t for everything, but we
11617c478bd9Sstevel@tonic-gate * xxx call lots of things that want uint_t arguments.
11627c478bd9Sstevel@tonic-gate */
11637c478bd9Sstevel@tonic-gate ASSERT(io_off == io_off_tmp);
11647c478bd9Sstevel@tonic-gate
11657c478bd9Sstevel@tonic-gate /*
11667c478bd9Sstevel@tonic-gate * get enough buffers for worst-case scenario
11677c478bd9Sstevel@tonic-gate * (i.e., no coalescing possible).
11687c478bd9Sstevel@tonic-gate */
11697c478bd9Sstevel@tonic-gate bufcnt = (len + secsize - 1) / secsize;
11707c478bd9Sstevel@tonic-gate bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
11717c478bd9Sstevel@tonic-gate vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
117284b82766Smg
117384b82766Smg /*
117484b82766Smg * Allocate a array of semaphores if we are doing I/O
117584b82766Smg * scheduling.
117684b82766Smg */
117784b82766Smg if (fsp->hqueue != NULL)
117884b82766Smg fio_done = kmem_alloc(bufcnt * sizeof (ksema_t),
117984b82766Smg KM_SLEEP);
11807c478bd9Sstevel@tonic-gate for (count = 0; count < bufcnt; count++) {
118184b82766Smg bioinit(&bufs[count]);
11827c478bd9Sstevel@tonic-gate bufs[count].b_edev = devvp->v_rdev;
11837c478bd9Sstevel@tonic-gate bufs[count].b_dev = cmpdev(devvp->v_rdev);
11847c478bd9Sstevel@tonic-gate bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
11857c478bd9Sstevel@tonic-gate bufs[count].b_iodone = hsfs_iodone;
11867c478bd9Sstevel@tonic-gate bufs[count].b_vp = vp;
11877c478bd9Sstevel@tonic-gate bufs[count].b_file = vp;
11887c478bd9Sstevel@tonic-gate }
11897c478bd9Sstevel@tonic-gate
11908cd7c4fcSpeterte /*
11918cd7c4fcSpeterte * If our filesize is not an integer multiple of PAGESIZE,
11928cd7c4fcSpeterte * we zero that part of the last page that's between EOF and
11938cd7c4fcSpeterte * the PAGESIZE boundary.
11948cd7c4fcSpeterte */
11957c478bd9Sstevel@tonic-gate xlen = io_len & PAGEOFFSET;
11967c478bd9Sstevel@tonic-gate if (xlen != 0)
11977c478bd9Sstevel@tonic-gate pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
11987c478bd9Sstevel@tonic-gate
11997c478bd9Sstevel@tonic-gate va = NULL;
12007c478bd9Sstevel@tonic-gate lastp = NULL;
12017c478bd9Sstevel@tonic-gate searchp = pp;
12027c478bd9Sstevel@tonic-gate io_end = io_off + io_len;
12037c478bd9Sstevel@tonic-gate for (count = 0, byte_offset = io_off;
1204d10b6702Sfrankho byte_offset < io_end; count++) {
12057c478bd9Sstevel@tonic-gate ASSERT(count < bufcnt);
12067c478bd9Sstevel@tonic-gate
12077c478bd9Sstevel@tonic-gate /* Compute disk address for interleaving. */
12087c478bd9Sstevel@tonic-gate
12097c478bd9Sstevel@tonic-gate /* considered without skips */
12107c478bd9Sstevel@tonic-gate which_chunk_lbn = byte_offset / chunk_data_bytes;
12117c478bd9Sstevel@tonic-gate
12127c478bd9Sstevel@tonic-gate /* factor in skips */
12137c478bd9Sstevel@tonic-gate offset_lbn = which_chunk_lbn * chunk_lbn_count;
12147c478bd9Sstevel@tonic-gate
12157c478bd9Sstevel@tonic-gate /* convert to physical byte offset for lbn */
12167c478bd9Sstevel@tonic-gate offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
12177c478bd9Sstevel@tonic-gate
12187c478bd9Sstevel@tonic-gate /* don't forget offset into lbn */
12197c478bd9Sstevel@tonic-gate offset_extra = byte_offset % chunk_data_bytes;
12207c478bd9Sstevel@tonic-gate
12217c478bd9Sstevel@tonic-gate /* get virtual block number for driver */
1222d10b6702Sfrankho driver_block =
1223d10b6702Sfrankho lbtodb(bof + xarsiz + offset_bytes + offset_extra);
12247c478bd9Sstevel@tonic-gate
12257c478bd9Sstevel@tonic-gate if (lastp != searchp) {
12267c478bd9Sstevel@tonic-gate /* this branch taken first time through loop */
1227d10b6702Sfrankho va = vas[count] =
1228d10b6702Sfrankho ppmapin(searchp, PROT_WRITE, (caddr_t)-1);
12297c478bd9Sstevel@tonic-gate /* ppmapin() guarantees not to return NULL */
12307c478bd9Sstevel@tonic-gate } else {
12317c478bd9Sstevel@tonic-gate vas[count] = NULL;
12327c478bd9Sstevel@tonic-gate }
12337c478bd9Sstevel@tonic-gate
12347c478bd9Sstevel@tonic-gate bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
12357c478bd9Sstevel@tonic-gate bufs[count].b_offset =
12367c478bd9Sstevel@tonic-gate (offset_t)(byte_offset - io_off + off);
12377c478bd9Sstevel@tonic-gate
12387c478bd9Sstevel@tonic-gate /*
12397c478bd9Sstevel@tonic-gate * We specifically use the b_lblkno member here
12407c478bd9Sstevel@tonic-gate * as even in the 32 bit world driver_block can
12417c478bd9Sstevel@tonic-gate * get very large in line with the ISO9660 spec.
12427c478bd9Sstevel@tonic-gate */
12437c478bd9Sstevel@tonic-gate
12447c478bd9Sstevel@tonic-gate bufs[count].b_lblkno = driver_block;
12457c478bd9Sstevel@tonic-gate
1246d10b6702Sfrankho remaining_bytes =
1247d10b6702Sfrankho ((which_chunk_lbn + 1) * chunk_data_bytes)
1248d10b6702Sfrankho - byte_offset;
12497c478bd9Sstevel@tonic-gate
12507c478bd9Sstevel@tonic-gate /*
12517c478bd9Sstevel@tonic-gate * remaining_bytes can't be zero, as we derived
12527c478bd9Sstevel@tonic-gate * which_chunk_lbn directly from byte_offset.
12537c478bd9Sstevel@tonic-gate */
12549cbc422eSpeterte if ((remaining_bytes + byte_offset) < (off + len)) {
12557c478bd9Sstevel@tonic-gate /* coalesce-read the rest of the chunk */
12567c478bd9Sstevel@tonic-gate bufs[count].b_bcount = remaining_bytes;
12577c478bd9Sstevel@tonic-gate } else {
12587c478bd9Sstevel@tonic-gate /* get the final bits */
12597c478bd9Sstevel@tonic-gate bufs[count].b_bcount = off + len - byte_offset;
12607c478bd9Sstevel@tonic-gate }
12617c478bd9Sstevel@tonic-gate
12627c478bd9Sstevel@tonic-gate /*
12637c478bd9Sstevel@tonic-gate * It would be nice to do multiple pages'
12647c478bd9Sstevel@tonic-gate * worth at once here when the opportunity
12657c478bd9Sstevel@tonic-gate * arises, as that has been shown to improve
12667c478bd9Sstevel@tonic-gate * our wall time. However, to do that
12677c478bd9Sstevel@tonic-gate * requires that we use the pageio subsystem,
12687c478bd9Sstevel@tonic-gate * which doesn't mix well with what we're
12697c478bd9Sstevel@tonic-gate * already using here. We can't use pageio
12707c478bd9Sstevel@tonic-gate * all the time, because that subsystem
12717c478bd9Sstevel@tonic-gate * assumes that a page is stored in N
12727c478bd9Sstevel@tonic-gate * contiguous blocks on the device.
12737c478bd9Sstevel@tonic-gate * Interleaving violates that assumption.
127484b82766Smg *
127584b82766Smg * Update: This is now not so big a problem
127684b82766Smg * because of the I/O scheduler sitting below
127784b82766Smg * that can re-order and coalesce I/O requests.
12787c478bd9Sstevel@tonic-gate */
12797c478bd9Sstevel@tonic-gate
12807c478bd9Sstevel@tonic-gate remainder = PAGESIZE - (byte_offset % PAGESIZE);
12817c478bd9Sstevel@tonic-gate if (bufs[count].b_bcount > remainder) {
12827c478bd9Sstevel@tonic-gate bufs[count].b_bcount = remainder;
12837c478bd9Sstevel@tonic-gate }
12847c478bd9Sstevel@tonic-gate
12857c478bd9Sstevel@tonic-gate bufs[count].b_bufsize = bufs[count].b_bcount;
12869cbc422eSpeterte if (((offset_t)byte_offset + bufs[count].b_bcount) >
1287d10b6702Sfrankho HS_MAXFILEOFF) {
12889cbc422eSpeterte break;
12899cbc422eSpeterte }
12907c478bd9Sstevel@tonic-gate byte_offset += bufs[count].b_bcount;
12917c478bd9Sstevel@tonic-gate
129284b82766Smg if (fsp->hqueue == NULL) {
129384b82766Smg (void) bdev_strategy(&bufs[count]);
129484b82766Smg
129584b82766Smg } else {
129684b82766Smg /*
129784b82766Smg * We are scheduling I/O so we need to enqueue
129884b82766Smg * requests rather than calling bdev_strategy
129984b82766Smg * here. A later invocation of the scheduling
130084b82766Smg * function will take care of doing the actual
130184b82766Smg * I/O as it selects requests from the queue as
130284b82766Smg * per the scheduling logic.
130384b82766Smg */
130484b82766Smg struct hio *hsio = kmem_cache_alloc(hio_cache,
130584b82766Smg KM_SLEEP);
130684b82766Smg
130784b82766Smg sema_init(&fio_done[count], 0, NULL,
130884b82766Smg SEMA_DEFAULT, NULL);
130984b82766Smg hsio->bp = &bufs[count];
131084b82766Smg hsio->sema = &fio_done[count];
131184b82766Smg hsio->io_lblkno = bufs[count].b_lblkno;
131284b82766Smg hsio->nblocks = howmany(hsio->bp->b_bcount,
131384b82766Smg DEV_BSIZE);
131484b82766Smg
131584b82766Smg /* used for deadline */
131684b82766Smg hsio->io_timestamp =
131784b82766Smg drv_hztousec(ddi_get_lbolt());
131884b82766Smg
131984b82766Smg /* for I/O coalescing */
132084b82766Smg hsio->contig_chain = NULL;
132184b82766Smg hsched_enqueue_io(fsp, hsio, 0);
132284b82766Smg }
13237c478bd9Sstevel@tonic-gate
13247c478bd9Sstevel@tonic-gate lwp_stat_update(LWP_STAT_INBLK, 1);
13257c478bd9Sstevel@tonic-gate lastp = searchp;
13267c478bd9Sstevel@tonic-gate if ((remainder - bufs[count].b_bcount) < 1) {
13277c478bd9Sstevel@tonic-gate searchp = searchp->p_next;
13287c478bd9Sstevel@tonic-gate }
13297c478bd9Sstevel@tonic-gate }
13307c478bd9Sstevel@tonic-gate
13317c478bd9Sstevel@tonic-gate bufsused = count;
13327c478bd9Sstevel@tonic-gate /* Now wait for everything to come in */
133384b82766Smg if (fsp->hqueue == NULL) {
133484b82766Smg for (count = 0; count < bufsused; count++) {
133584b82766Smg if (err == 0) {
133684b82766Smg err = biowait(&bufs[count]);
133784b82766Smg } else
133884b82766Smg (void) biowait(&bufs[count]);
133984b82766Smg }
134084b82766Smg } else {
134184b82766Smg for (count = 0; count < bufsused; count++) {
134284b82766Smg struct buf *wbuf;
134384b82766Smg
134484b82766Smg /*
134584b82766Smg * Invoke scheduling function till our buf
134684b82766Smg * is processed. In doing this it might
134784b82766Smg * process bufs enqueued by other threads
134884b82766Smg * which is good.
134984b82766Smg */
135084b82766Smg wbuf = &bufs[count];
135184b82766Smg DTRACE_PROBE1(hsfs_io_wait, struct buf *, wbuf);
135284b82766Smg while (sema_tryp(&fio_done[count]) == 0) {
135384b82766Smg /*
135484b82766Smg * hsched_invoke_strategy will return 1
135584b82766Smg * if the I/O queue is empty. This means
135684b82766Smg * that there is another thread who has
135784b82766Smg * issued our buf and is waiting. So we
135884b82766Smg * just block instead of spinning.
135984b82766Smg */
136084b82766Smg if (hsched_invoke_strategy(fsp)) {
136184b82766Smg sema_p(&fio_done[count]);
136284b82766Smg break;
136384b82766Smg }
136484b82766Smg }
136584b82766Smg sema_destroy(&fio_done[count]);
136684b82766Smg DTRACE_PROBE1(hsfs_io_done, struct buf *, wbuf);
136784b82766Smg
136884b82766Smg if (err == 0) {
136984b82766Smg err = geterror(wbuf);
137084b82766Smg }
137184b82766Smg }
137284b82766Smg kmem_free(fio_done, bufcnt * sizeof (ksema_t));
13737c478bd9Sstevel@tonic-gate }
13747c478bd9Sstevel@tonic-gate
13757c478bd9Sstevel@tonic-gate /* Don't leak resources */
13767c478bd9Sstevel@tonic-gate for (count = 0; count < bufcnt; count++) {
137784b82766Smg biofini(&bufs[count]);
13787c478bd9Sstevel@tonic-gate if (count < bufsused && vas[count] != NULL) {
13797c478bd9Sstevel@tonic-gate ppmapout(vas[count]);
13807c478bd9Sstevel@tonic-gate }
13817c478bd9Sstevel@tonic-gate }
13827c478bd9Sstevel@tonic-gate
13837c478bd9Sstevel@tonic-gate kmem_free(vas, bufcnt * sizeof (caddr_t));
13847c478bd9Sstevel@tonic-gate kmem_free(bufs, bufcnt * sizeof (struct buf));
13857c478bd9Sstevel@tonic-gate }
13867c478bd9Sstevel@tonic-gate
13877c478bd9Sstevel@tonic-gate if (err) {
13887c478bd9Sstevel@tonic-gate pvn_read_done(pp, B_ERROR);
13897c478bd9Sstevel@tonic-gate return (err);
13907c478bd9Sstevel@tonic-gate }
13917c478bd9Sstevel@tonic-gate
13927c478bd9Sstevel@tonic-gate /*
13937c478bd9Sstevel@tonic-gate * Lock the requested page, and the one after it if possible.
13947c478bd9Sstevel@tonic-gate * Don't bother if our caller hasn't given us a place to stash
13957c478bd9Sstevel@tonic-gate * the page pointers, since otherwise we'd lock pages that would
13967c478bd9Sstevel@tonic-gate * never get unlocked.
13977c478bd9Sstevel@tonic-gate */
13987c478bd9Sstevel@tonic-gate if (pagefound) {
13997c478bd9Sstevel@tonic-gate int index;
14007c478bd9Sstevel@tonic-gate ulong_t soff;
14017c478bd9Sstevel@tonic-gate
14027c478bd9Sstevel@tonic-gate /*
14037c478bd9Sstevel@tonic-gate * Make sure it's in memory before we say it's here.
14047c478bd9Sstevel@tonic-gate */
14057c478bd9Sstevel@tonic-gate if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
14067c478bd9Sstevel@tonic-gate hsfs_lostpage++;
14077c478bd9Sstevel@tonic-gate goto reread;
14087c478bd9Sstevel@tonic-gate }
14097c478bd9Sstevel@tonic-gate
14107c478bd9Sstevel@tonic-gate pl[0] = pp;
14117c478bd9Sstevel@tonic-gate index = 1;
141284b82766Smg atomic_inc_64(&fsp->cache_read_pages);
14137c478bd9Sstevel@tonic-gate
14147c478bd9Sstevel@tonic-gate /*
14157c478bd9Sstevel@tonic-gate * Try to lock the next page, if it exists, without
14167c478bd9Sstevel@tonic-gate * blocking.
14177c478bd9Sstevel@tonic-gate */
14187c478bd9Sstevel@tonic-gate plsz -= PAGESIZE;
14197c478bd9Sstevel@tonic-gate /* LINTED (plsz is unsigned) */
14207c478bd9Sstevel@tonic-gate for (soff = off + PAGESIZE; plsz > 0;
14217c478bd9Sstevel@tonic-gate soff += PAGESIZE, plsz -= PAGESIZE) {
14227c478bd9Sstevel@tonic-gate pp = page_lookup_nowait(vp, (u_offset_t)soff,
1423d10b6702Sfrankho SE_SHARED);
14247c478bd9Sstevel@tonic-gate if (pp == NULL)
14257c478bd9Sstevel@tonic-gate break;
14267c478bd9Sstevel@tonic-gate pl[index++] = pp;
14277c478bd9Sstevel@tonic-gate }
14287c478bd9Sstevel@tonic-gate pl[index] = NULL;
142984b82766Smg
143084b82766Smg /*
143184b82766Smg * Schedule a semi-asynchronous readahead if we are
143284b82766Smg * accessing the last cached page for the current
143384b82766Smg * file.
143484b82766Smg *
143584b82766Smg * Doing this here means that readaheads will be
143684b82766Smg * issued only if cache-hits occur. This is an advantage
143784b82766Smg * since cache-hits would mean that readahead is giving
143884b82766Smg * the desired benefit. If cache-hits do not occur there
143984b82766Smg * is no point in reading ahead of time - the system
144084b82766Smg * is loaded anyway.
144184b82766Smg */
144284b82766Smg if (fsp->hqueue != NULL &&
144384b82766Smg hp->hs_prev_offset - off == PAGESIZE &&
144484b82766Smg hp->hs_prev_offset < filsiz &&
144584b82766Smg hp->hs_ra_bytes > 0 &&
144684b82766Smg !page_exists(vp, hp->hs_prev_offset)) {
144784b82766Smg (void) hsfs_getpage_ra(vp, hp->hs_prev_offset, seg,
144884b82766Smg addr + PAGESIZE, hp, fsp, xarsiz, bof,
144984b82766Smg chunk_lbn_count, chunk_data_bytes);
145084b82766Smg }
145184b82766Smg
14527c478bd9Sstevel@tonic-gate return (0);
14537c478bd9Sstevel@tonic-gate }
14547c478bd9Sstevel@tonic-gate
14557c478bd9Sstevel@tonic-gate if (pp != NULL) {
14567c478bd9Sstevel@tonic-gate pvn_plist_init(pp, pl, plsz, off, io_len, rw);
14577c478bd9Sstevel@tonic-gate }
14587c478bd9Sstevel@tonic-gate
14597c478bd9Sstevel@tonic-gate return (err);
14607c478bd9Sstevel@tonic-gate }
14617c478bd9Sstevel@tonic-gate
1462da6c28aaSamw /*ARGSUSED*/
14637c478bd9Sstevel@tonic-gate static int
hsfs_getpage(struct vnode * vp,offset_t off,size_t len,uint_t * protp,struct page * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,struct cred * cred,caller_context_t * ct)1464*ade42b55SSebastien Roy hsfs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
1465*ade42b55SSebastien Roy struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
1466*ade42b55SSebastien Roy enum seg_rw rw, struct cred *cred, caller_context_t *ct)
14677c478bd9Sstevel@tonic-gate {
14687c478bd9Sstevel@tonic-gate uint_t filsiz;
146984b82766Smg struct hsfs *fsp;
147084b82766Smg struct hsnode *hp;
147184b82766Smg
147284b82766Smg fsp = VFS_TO_HSFS(vp->v_vfsp);
147384b82766Smg hp = VTOH(vp);
14747c478bd9Sstevel@tonic-gate
14757c478bd9Sstevel@tonic-gate /* does not support write */
14767c478bd9Sstevel@tonic-gate if (rw == S_WRITE) {
1477de4ddf9cSKeith M Wesolowski return (EROFS);
14787c478bd9Sstevel@tonic-gate }
14797c478bd9Sstevel@tonic-gate
14807c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP) {
14817c478bd9Sstevel@tonic-gate return (ENOSYS);
14827c478bd9Sstevel@tonic-gate }
14837c478bd9Sstevel@tonic-gate
14849cbc422eSpeterte ASSERT(off <= HS_MAXFILEOFF);
14857c478bd9Sstevel@tonic-gate
14867c478bd9Sstevel@tonic-gate /*
14877c478bd9Sstevel@tonic-gate * Determine file data size for EOF check.
14887c478bd9Sstevel@tonic-gate */
14897c478bd9Sstevel@tonic-gate filsiz = hp->hs_dirent.ext_size;
14907c478bd9Sstevel@tonic-gate if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
14917c478bd9Sstevel@tonic-gate return (EFAULT); /* beyond EOF */
14927c478bd9Sstevel@tonic-gate
149384b82766Smg /*
149484b82766Smg * Async Read-ahead computation.
149584b82766Smg * This attempts to detect sequential access pattern and
149684b82766Smg * enables reading extra pages ahead of time.
149784b82766Smg */
149884b82766Smg if (fsp->hqueue != NULL) {
149984b82766Smg /*
150084b82766Smg * This check for sequential access also takes into
150184b82766Smg * account segmap weirdness when reading in chunks
150284b82766Smg * less than the segmap size of 8K.
150384b82766Smg */
150484b82766Smg if (hp->hs_prev_offset == off || (off <
150584b82766Smg hp->hs_prev_offset && off + MAX(len, PAGESIZE)
150684b82766Smg >= hp->hs_prev_offset)) {
150784b82766Smg if (hp->hs_num_contig <
150884b82766Smg (seq_contig_requests - 1)) {
150984b82766Smg hp->hs_num_contig++;
151084b82766Smg
151184b82766Smg } else {
151284b82766Smg /*
151384b82766Smg * We increase readahead quantum till
151484b82766Smg * a predefined max. max_readahead_bytes
151584b82766Smg * is a multiple of PAGESIZE.
151684b82766Smg */
151784b82766Smg if (hp->hs_ra_bytes <
151884b82766Smg fsp->hqueue->max_ra_bytes) {
151984b82766Smg hp->hs_ra_bytes += PAGESIZE;
152084b82766Smg }
152184b82766Smg }
152284b82766Smg } else {
152384b82766Smg /*
152484b82766Smg * Not contiguous so reduce read ahead counters.
152584b82766Smg */
152684b82766Smg if (hp->hs_ra_bytes > 0)
152784b82766Smg hp->hs_ra_bytes -= PAGESIZE;
152884b82766Smg
152984b82766Smg if (hp->hs_ra_bytes <= 0) {
153084b82766Smg hp->hs_ra_bytes = 0;
153184b82766Smg if (hp->hs_num_contig > 0)
153284b82766Smg hp->hs_num_contig--;
153384b82766Smg }
153484b82766Smg }
153584b82766Smg /*
153684b82766Smg * Length must be rounded up to page boundary.
153784b82766Smg * since we read in units of pages.
153884b82766Smg */
153984b82766Smg hp->hs_prev_offset = off + roundup(len, PAGESIZE);
154084b82766Smg DTRACE_PROBE1(hsfs_compute_ra, struct hsnode *, hp);
154184b82766Smg }
15427c478bd9Sstevel@tonic-gate if (protp != NULL)
15437c478bd9Sstevel@tonic-gate *protp = PROT_ALL;
15447c478bd9Sstevel@tonic-gate
154506e6833aSJosef 'Jeff' Sipek return (pvn_getpages(hsfs_getapage, vp, off, len, protp, pl, plsz,
154606e6833aSJosef 'Jeff' Sipek seg, addr, rw, cred));
15477c478bd9Sstevel@tonic-gate }
15487c478bd9Sstevel@tonic-gate
15497c478bd9Sstevel@tonic-gate
15507c478bd9Sstevel@tonic-gate
15517c478bd9Sstevel@tonic-gate /*
15527c478bd9Sstevel@tonic-gate * This function should never be called. We need to have it to pass
15537c478bd9Sstevel@tonic-gate * it as an argument to other functions.
15547c478bd9Sstevel@tonic-gate */
15557c478bd9Sstevel@tonic-gate /*ARGSUSED*/
15567c478bd9Sstevel@tonic-gate int
hsfs_putapage(vnode_t * vp,page_t * pp,u_offset_t * offp,size_t * lenp,int flags,cred_t * cr)1557*ade42b55SSebastien Roy hsfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
1558*ade42b55SSebastien Roy int flags, cred_t *cr)
15597c478bd9Sstevel@tonic-gate {
15607c478bd9Sstevel@tonic-gate /* should never happen - just destroy it */
15617c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
15627c478bd9Sstevel@tonic-gate pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
15637c478bd9Sstevel@tonic-gate return (0);
15647c478bd9Sstevel@tonic-gate }
15657c478bd9Sstevel@tonic-gate
15667c478bd9Sstevel@tonic-gate
15677c478bd9Sstevel@tonic-gate /*
15687c478bd9Sstevel@tonic-gate * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
15697c478bd9Sstevel@tonic-gate * B_INVAL is set by:
15707c478bd9Sstevel@tonic-gate *
15717c478bd9Sstevel@tonic-gate * 1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
15727c478bd9Sstevel@tonic-gate * 2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
15737c478bd9Sstevel@tonic-gate * which translates to an MC_SYNC with the MS_INVALIDATE flag.
15747c478bd9Sstevel@tonic-gate *
15757c478bd9Sstevel@tonic-gate * The B_FREE (as well as the B_DONTNEED) flag is set when the
15767c478bd9Sstevel@tonic-gate * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
15777c478bd9Sstevel@tonic-gate * from SEGVN to release pages behind a pagefault.
15787c478bd9Sstevel@tonic-gate */
15797c478bd9Sstevel@tonic-gate /*ARGSUSED*/
15807c478bd9Sstevel@tonic-gate static int
hsfs_putpage(struct vnode * vp,offset_t off,size_t len,int flags,struct cred * cr,caller_context_t * ct)1581*ade42b55SSebastien Roy hsfs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
1582*ade42b55SSebastien Roy struct cred *cr, caller_context_t *ct)
15837c478bd9Sstevel@tonic-gate {
15847c478bd9Sstevel@tonic-gate int error = 0;
15857c478bd9Sstevel@tonic-gate
15867c478bd9Sstevel@tonic-gate if (vp->v_count == 0) {
15877c478bd9Sstevel@tonic-gate panic("hsfs_putpage: bad v_count");
15887c478bd9Sstevel@tonic-gate /*NOTREACHED*/
15897c478bd9Sstevel@tonic-gate }
15907c478bd9Sstevel@tonic-gate
15917c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP)
15927c478bd9Sstevel@tonic-gate return (ENOSYS);
15937c478bd9Sstevel@tonic-gate
15949cbc422eSpeterte ASSERT(off <= HS_MAXFILEOFF);
15957c478bd9Sstevel@tonic-gate
15967c478bd9Sstevel@tonic-gate if (!vn_has_cached_data(vp)) /* no pages mapped */
15977c478bd9Sstevel@tonic-gate return (0);
15987c478bd9Sstevel@tonic-gate
1599d10b6702Sfrankho if (len == 0) { /* from 'off' to EOF */
1600d10b6702Sfrankho error = pvn_vplist_dirty(vp, off, hsfs_putapage, flags, cr);
1601d10b6702Sfrankho } else {
16027c478bd9Sstevel@tonic-gate offset_t end_off = off + len;
16037c478bd9Sstevel@tonic-gate offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
16047c478bd9Sstevel@tonic-gate offset_t io_off;
16057c478bd9Sstevel@tonic-gate
16067c478bd9Sstevel@tonic-gate file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
16077c478bd9Sstevel@tonic-gate if (end_off > file_size)
16087c478bd9Sstevel@tonic-gate end_off = file_size;
16097c478bd9Sstevel@tonic-gate
16107c478bd9Sstevel@tonic-gate for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
16117c478bd9Sstevel@tonic-gate page_t *pp;
16127c478bd9Sstevel@tonic-gate
16137c478bd9Sstevel@tonic-gate /*
16147c478bd9Sstevel@tonic-gate * We insist on getting the page only if we are
16157c478bd9Sstevel@tonic-gate * about to invalidate, free or write it and
16167c478bd9Sstevel@tonic-gate * the B_ASYNC flag is not set.
16177c478bd9Sstevel@tonic-gate */
16187c478bd9Sstevel@tonic-gate if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
16197c478bd9Sstevel@tonic-gate pp = page_lookup(vp, io_off,
1620d10b6702Sfrankho (flags & (B_INVAL | B_FREE)) ?
1621d10b6702Sfrankho SE_EXCL : SE_SHARED);
16227c478bd9Sstevel@tonic-gate } else {
16237c478bd9Sstevel@tonic-gate pp = page_lookup_nowait(vp, io_off,
1624d10b6702Sfrankho (flags & B_FREE) ? SE_EXCL : SE_SHARED);
16257c478bd9Sstevel@tonic-gate }
16267c478bd9Sstevel@tonic-gate
16277c478bd9Sstevel@tonic-gate if (pp == NULL)
16287c478bd9Sstevel@tonic-gate continue;
162984b82766Smg
16307c478bd9Sstevel@tonic-gate /*
16317c478bd9Sstevel@tonic-gate * Normally pvn_getdirty() should return 0, which
16327c478bd9Sstevel@tonic-gate * impies that it has done the job for us.
16337c478bd9Sstevel@tonic-gate * The shouldn't-happen scenario is when it returns 1.
16347c478bd9Sstevel@tonic-gate * This means that the page has been modified and
16357c478bd9Sstevel@tonic-gate * needs to be put back.
16367c478bd9Sstevel@tonic-gate * Since we can't write on a CD, we fake a failed
16377c478bd9Sstevel@tonic-gate * I/O and force pvn_write_done() to destroy the page.
16387c478bd9Sstevel@tonic-gate */
16397c478bd9Sstevel@tonic-gate if (pvn_getdirty(pp, flags) == 1) {
16407c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE,
1641d10b6702Sfrankho "hsfs_putpage: dirty HSFS page");
16427c478bd9Sstevel@tonic-gate pvn_write_done(pp, flags |
16437c478bd9Sstevel@tonic-gate B_ERROR | B_WRITE | B_INVAL | B_FORCE);
16447c478bd9Sstevel@tonic-gate }
16457c478bd9Sstevel@tonic-gate }
16467c478bd9Sstevel@tonic-gate }
16477c478bd9Sstevel@tonic-gate return (error);
16487c478bd9Sstevel@tonic-gate }
16497c478bd9Sstevel@tonic-gate
16507c478bd9Sstevel@tonic-gate
16517c478bd9Sstevel@tonic-gate /*ARGSUSED*/
16527c478bd9Sstevel@tonic-gate static int
hsfs_map(struct vnode * vp,offset_t off,struct as * as,caddr_t * addrp,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,struct cred * cred,caller_context_t * ct)1653*ade42b55SSebastien Roy hsfs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
1654*ade42b55SSebastien Roy size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cred,
1655*ade42b55SSebastien Roy caller_context_t *ct)
16567c478bd9Sstevel@tonic-gate {
16577c478bd9Sstevel@tonic-gate struct segvn_crargs vn_a;
16587c478bd9Sstevel@tonic-gate int error;
16597c478bd9Sstevel@tonic-gate
16607c478bd9Sstevel@tonic-gate /* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
16617c478bd9Sstevel@tonic-gate
16627c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP)
16637c478bd9Sstevel@tonic-gate return (ENOSYS);
16647c478bd9Sstevel@tonic-gate
1665277b8dcbSHans Rosenfeld if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
1666de4ddf9cSKeith M Wesolowski return (ENOSYS);
1667de4ddf9cSKeith M Wesolowski
16689cbc422eSpeterte if (off > HS_MAXFILEOFF || off < 0 ||
16699cbc422eSpeterte (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1670cfa55013Speterte return (ENXIO);
16717c478bd9Sstevel@tonic-gate
16727c478bd9Sstevel@tonic-gate if (vp->v_type != VREG) {
16737c478bd9Sstevel@tonic-gate return (ENODEV);
16747c478bd9Sstevel@tonic-gate }
16757c478bd9Sstevel@tonic-gate
16767c478bd9Sstevel@tonic-gate /*
16777c478bd9Sstevel@tonic-gate * If file is being locked, disallow mapping.
16787c478bd9Sstevel@tonic-gate */
16797c478bd9Sstevel@tonic-gate if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
16807c478bd9Sstevel@tonic-gate return (EAGAIN);
16817c478bd9Sstevel@tonic-gate
16827c478bd9Sstevel@tonic-gate as_rangelock(as);
168360946fe0Smec error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
168460946fe0Smec if (error != 0) {
168560946fe0Smec as_rangeunlock(as);
168660946fe0Smec return (error);
16877c478bd9Sstevel@tonic-gate }
16887c478bd9Sstevel@tonic-gate
16897c478bd9Sstevel@tonic-gate vn_a.vp = vp;
16907c478bd9Sstevel@tonic-gate vn_a.offset = off;
16917c478bd9Sstevel@tonic-gate vn_a.type = flags & MAP_TYPE;
16927c478bd9Sstevel@tonic-gate vn_a.prot = prot;
16937c478bd9Sstevel@tonic-gate vn_a.maxprot = maxprot;
16947c478bd9Sstevel@tonic-gate vn_a.flags = flags & ~MAP_TYPE;
16957c478bd9Sstevel@tonic-gate vn_a.cred = cred;
16967c478bd9Sstevel@tonic-gate vn_a.amp = NULL;
16977c478bd9Sstevel@tonic-gate vn_a.szc = 0;
16987c478bd9Sstevel@tonic-gate vn_a.lgrp_mem_policy_flags = 0;
16997c478bd9Sstevel@tonic-gate
17007c478bd9Sstevel@tonic-gate error = as_map(as, *addrp, len, segvn_create, &vn_a);
17017c478bd9Sstevel@tonic-gate as_rangeunlock(as);
17027c478bd9Sstevel@tonic-gate return (error);
17037c478bd9Sstevel@tonic-gate }
17047c478bd9Sstevel@tonic-gate
17057c478bd9Sstevel@tonic-gate /* ARGSUSED */
17067c478bd9Sstevel@tonic-gate static int
hsfs_addmap(struct vnode * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,struct cred * cr,caller_context_t * ct)1707*ade42b55SSebastien Roy hsfs_addmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
1708*ade42b55SSebastien Roy size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
1709*ade42b55SSebastien Roy caller_context_t *ct)
17107c478bd9Sstevel@tonic-gate {
17117c478bd9Sstevel@tonic-gate struct hsnode *hp;
17127c478bd9Sstevel@tonic-gate
17137c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP)
17147c478bd9Sstevel@tonic-gate return (ENOSYS);
17157c478bd9Sstevel@tonic-gate
17167c478bd9Sstevel@tonic-gate hp = VTOH(vp);
17177c478bd9Sstevel@tonic-gate mutex_enter(&hp->hs_contents_lock);
17187c478bd9Sstevel@tonic-gate hp->hs_mapcnt += btopr(len);
17197c478bd9Sstevel@tonic-gate mutex_exit(&hp->hs_contents_lock);
17207c478bd9Sstevel@tonic-gate return (0);
17217c478bd9Sstevel@tonic-gate }
17227c478bd9Sstevel@tonic-gate
17237c478bd9Sstevel@tonic-gate /*ARGSUSED*/
17247c478bd9Sstevel@tonic-gate static int
hsfs_delmap(struct vnode * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uint_t prot,uint_t maxprot,uint_t flags,struct cred * cr,caller_context_t * ct)1725*ade42b55SSebastien Roy hsfs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
1726*ade42b55SSebastien Roy size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cr,
1727*ade42b55SSebastien Roy caller_context_t *ct)
17287c478bd9Sstevel@tonic-gate {
17297c478bd9Sstevel@tonic-gate struct hsnode *hp;
17307c478bd9Sstevel@tonic-gate
17317c478bd9Sstevel@tonic-gate if (vp->v_flag & VNOMAP)
17327c478bd9Sstevel@tonic-gate return (ENOSYS);
17337c478bd9Sstevel@tonic-gate
17347c478bd9Sstevel@tonic-gate hp = VTOH(vp);
17357c478bd9Sstevel@tonic-gate mutex_enter(&hp->hs_contents_lock);
17367c478bd9Sstevel@tonic-gate hp->hs_mapcnt -= btopr(len); /* Count released mappings */
17377c478bd9Sstevel@tonic-gate ASSERT(hp->hs_mapcnt >= 0);
17387c478bd9Sstevel@tonic-gate mutex_exit(&hp->hs_contents_lock);
17397c478bd9Sstevel@tonic-gate return (0);
17407c478bd9Sstevel@tonic-gate }
17417c478bd9Sstevel@tonic-gate
17427c478bd9Sstevel@tonic-gate /* ARGSUSED */
17437c478bd9Sstevel@tonic-gate static int
hsfs_seek(struct vnode * vp,offset_t ooff,offset_t * noffp,caller_context_t * ct)1744*ade42b55SSebastien Roy hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
1745*ade42b55SSebastien Roy caller_context_t *ct)
17467c478bd9Sstevel@tonic-gate {
17477c478bd9Sstevel@tonic-gate return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
17487c478bd9Sstevel@tonic-gate }
17497c478bd9Sstevel@tonic-gate
17507c478bd9Sstevel@tonic-gate /* ARGSUSED */
17517c478bd9Sstevel@tonic-gate static int
hsfs_frlock(struct vnode * vp,int cmd,struct flock64 * bfp,int flag,offset_t offset,struct flk_callback * flk_cbp,cred_t * cr,caller_context_t * ct)1752*ade42b55SSebastien Roy hsfs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
1753*ade42b55SSebastien Roy offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
1754*ade42b55SSebastien Roy caller_context_t *ct)
17557c478bd9Sstevel@tonic-gate {
17567c478bd9Sstevel@tonic-gate struct hsnode *hp = VTOH(vp);
17577c478bd9Sstevel@tonic-gate
17587c478bd9Sstevel@tonic-gate /*
17597c478bd9Sstevel@tonic-gate * If the file is being mapped, disallow fs_frlock.
17607c478bd9Sstevel@tonic-gate * We are not holding the hs_contents_lock while checking
17617c478bd9Sstevel@tonic-gate * hs_mapcnt because the current locking strategy drops all
17627c478bd9Sstevel@tonic-gate * locks before calling fs_frlock.
17637c478bd9Sstevel@tonic-gate * So, hs_mapcnt could change before we enter fs_frlock making
17647c478bd9Sstevel@tonic-gate * it meaningless to have held hs_contents_lock in the first place.
17657c478bd9Sstevel@tonic-gate */
17667c478bd9Sstevel@tonic-gate if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
17677c478bd9Sstevel@tonic-gate return (EAGAIN);
17687c478bd9Sstevel@tonic-gate
1769da6c28aaSamw return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
17707c478bd9Sstevel@tonic-gate }
17717c478bd9Sstevel@tonic-gate
177284b82766Smg static int
hsched_deadline_compare(const void * x1,const void * x2)177384b82766Smg hsched_deadline_compare(const void *x1, const void *x2)
177484b82766Smg {
177584b82766Smg const struct hio *h1 = x1;
177684b82766Smg const struct hio *h2 = x2;
177784b82766Smg
177884b82766Smg if (h1->io_timestamp < h2->io_timestamp)
177984b82766Smg return (-1);
178084b82766Smg if (h1->io_timestamp > h2->io_timestamp)
178184b82766Smg return (1);
178284b82766Smg
178384b82766Smg if (h1->io_lblkno < h2->io_lblkno)
178484b82766Smg return (-1);
178584b82766Smg if (h1->io_lblkno > h2->io_lblkno)
178684b82766Smg return (1);
178784b82766Smg
178884b82766Smg if (h1 < h2)
178984b82766Smg return (-1);
179084b82766Smg if (h1 > h2)
179184b82766Smg return (1);
179284b82766Smg
179384b82766Smg return (0);
179484b82766Smg }
179584b82766Smg
179684b82766Smg static int
hsched_offset_compare(const void * x1,const void * x2)179784b82766Smg hsched_offset_compare(const void *x1, const void *x2)
179884b82766Smg {
179984b82766Smg const struct hio *h1 = x1;
180084b82766Smg const struct hio *h2 = x2;
180184b82766Smg
180284b82766Smg if (h1->io_lblkno < h2->io_lblkno)
180384b82766Smg return (-1);
180484b82766Smg if (h1->io_lblkno > h2->io_lblkno)
180584b82766Smg return (1);
180684b82766Smg
180784b82766Smg if (h1 < h2)
180884b82766Smg return (-1);
180984b82766Smg if (h1 > h2)
181084b82766Smg return (1);
181184b82766Smg
181284b82766Smg return (0);
181384b82766Smg }
181484b82766Smg
181584b82766Smg void
hsched_init_caches(void)181684b82766Smg hsched_init_caches(void)
181784b82766Smg {
181884b82766Smg hio_cache = kmem_cache_create("hsfs_hio_cache",
181984b82766Smg sizeof (struct hio), 0, NULL,
182084b82766Smg NULL, NULL, NULL, NULL, 0);
182184b82766Smg
182284b82766Smg hio_info_cache = kmem_cache_create("hsfs_hio_info_cache",
182384b82766Smg sizeof (struct hio_info), 0, NULL,
182484b82766Smg NULL, NULL, NULL, NULL, 0);
182584b82766Smg }
182684b82766Smg
182784b82766Smg void
hsched_fini_caches(void)182884b82766Smg hsched_fini_caches(void)
182984b82766Smg {
183084b82766Smg kmem_cache_destroy(hio_cache);
183184b82766Smg kmem_cache_destroy(hio_info_cache);
183284b82766Smg }
183384b82766Smg
183484b82766Smg /*
183584b82766Smg * Initialize I/O scheduling structures. This is called via hsfs_mount
183684b82766Smg */
183784b82766Smg void
hsched_init(struct hsfs * fsp,int fsid,struct modlinkage * modlinkage)183884b82766Smg hsched_init(struct hsfs *fsp, int fsid, struct modlinkage *modlinkage)
183984b82766Smg {
184084b82766Smg struct hsfs_queue *hqueue = fsp->hqueue;
184184b82766Smg struct vnode *vp = fsp->hsfs_devvp;
184284b82766Smg
184384b82766Smg /* TaskQ name of the form: hsched_task_ + stringof(int) */
184484b82766Smg char namebuf[23];
184584b82766Smg int error, err;
184684b82766Smg struct dk_cinfo info;
184784b82766Smg ldi_handle_t lh;
184884b82766Smg ldi_ident_t li;
184984b82766Smg
185084b82766Smg /*
185184b82766Smg * Default maxtransfer = 16k chunk
185284b82766Smg */
185384b82766Smg hqueue->dev_maxtransfer = 16384;
185484b82766Smg
185584b82766Smg /*
185684b82766Smg * Try to fetch the maximum device transfer size. This is used to
185784b82766Smg * ensure that a coalesced block does not exceed the maxtransfer.
185884b82766Smg */
185984b82766Smg err = ldi_ident_from_mod(modlinkage, &li);
186084b82766Smg if (err) {
186184b82766Smg cmn_err(CE_NOTE, "hsched_init: Querying device failed");
186284b82766Smg cmn_err(CE_NOTE, "hsched_init: ldi_ident_from_mod err=%d\n",
186384b82766Smg err);
186484b82766Smg goto set_ra;
186584b82766Smg }
186684b82766Smg
186784b82766Smg err = ldi_open_by_dev(&(vp->v_rdev), OTYP_CHR, FREAD, CRED(), &lh, li);
186884b82766Smg ldi_ident_release(li);
186984b82766Smg if (err) {
187084b82766Smg cmn_err(CE_NOTE, "hsched_init: Querying device failed");
187184b82766Smg cmn_err(CE_NOTE, "hsched_init: ldi_open err=%d\n", err);
187284b82766Smg goto set_ra;
187384b82766Smg }
187484b82766Smg
187584b82766Smg error = ldi_ioctl(lh, DKIOCINFO, (intptr_t)&info, FKIOCTL,
187684b82766Smg CRED(), &err);
187784b82766Smg err = ldi_close(lh, FREAD, CRED());
187884b82766Smg if (err) {
187984b82766Smg cmn_err(CE_NOTE, "hsched_init: Querying device failed");
188084b82766Smg cmn_err(CE_NOTE, "hsched_init: ldi_close err=%d\n", err);
188184b82766Smg }
188284b82766Smg
188384b82766Smg if (error == 0) {
188484b82766Smg hqueue->dev_maxtransfer = ldbtob(info.dki_maxtransfer);
188584b82766Smg }
188684b82766Smg
188784b82766Smg set_ra:
188884b82766Smg /*
188984b82766Smg * Max size of data to read ahead for sequential access pattern.
189084b82766Smg * Conservative to avoid letting the underlying CD drive to spin
189184b82766Smg * down, in case the application is reading slowly.
189284b82766Smg * We read ahead upto a max of 4 pages.
189384b82766Smg */
189484b82766Smg hqueue->max_ra_bytes = PAGESIZE * 8;
189584b82766Smg
189684b82766Smg mutex_init(&(hqueue->hsfs_queue_lock), NULL, MUTEX_DEFAULT, NULL);
189784b82766Smg mutex_init(&(hqueue->strategy_lock), NULL, MUTEX_DEFAULT, NULL);
189884b82766Smg avl_create(&(hqueue->read_tree), hsched_offset_compare,
189984b82766Smg sizeof (struct hio), offsetof(struct hio, io_offset_node));
190084b82766Smg avl_create(&(hqueue->deadline_tree), hsched_deadline_compare,
190184b82766Smg sizeof (struct hio), offsetof(struct hio, io_deadline_node));
190284b82766Smg
190384b82766Smg (void) snprintf(namebuf, sizeof (namebuf), "hsched_task_%d", fsid);
190484b82766Smg hqueue->ra_task = taskq_create(namebuf, hsfs_taskq_nthreads,
190584b82766Smg minclsyspri + 2, 1, 104857600 / PAGESIZE, TASKQ_DYNAMIC);
190684b82766Smg
190784b82766Smg hqueue->next = NULL;
190884b82766Smg hqueue->nbuf = kmem_zalloc(sizeof (struct buf), KM_SLEEP);
190984b82766Smg }
191084b82766Smg
191184b82766Smg void
hsched_fini(struct hsfs_queue * hqueue)191284b82766Smg hsched_fini(struct hsfs_queue *hqueue)
191384b82766Smg {
191484b82766Smg if (hqueue != NULL) {
1915f9ec9c5aSmg /*
1916f9ec9c5aSmg * Remove the sentinel if there was one.
1917f9ec9c5aSmg */
1918f9ec9c5aSmg if (hqueue->next != NULL) {
1919f9ec9c5aSmg avl_remove(&hqueue->read_tree, hqueue->next);
1920f9ec9c5aSmg kmem_cache_free(hio_cache, hqueue->next);
1921f9ec9c5aSmg }
192284b82766Smg avl_destroy(&(hqueue->read_tree));
192384b82766Smg avl_destroy(&(hqueue->deadline_tree));
192484b82766Smg mutex_destroy(&(hqueue->hsfs_queue_lock));
192584b82766Smg mutex_destroy(&(hqueue->strategy_lock));
192684b82766Smg
192784b82766Smg /*
192884b82766Smg * If there are any existing readahead threads running
192984b82766Smg * taskq_destroy will wait for them to finish.
193084b82766Smg */
193184b82766Smg taskq_destroy(hqueue->ra_task);
193284b82766Smg kmem_free(hqueue->nbuf, sizeof (struct buf));
193384b82766Smg }
193484b82766Smg }
193584b82766Smg
193684b82766Smg /*
193784b82766Smg * Determine if two I/O requests are adjacent to each other so
193884b82766Smg * that they can coalesced.
193984b82766Smg */
194084b82766Smg #define IS_ADJACENT(io, nio) \
194184b82766Smg (((io)->io_lblkno + (io)->nblocks == (nio)->io_lblkno) && \
194284b82766Smg (io)->bp->b_edev == (nio)->bp->b_edev)
194384b82766Smg
194484b82766Smg /*
194584b82766Smg * This performs the actual I/O scheduling logic. We use the Circular
194684b82766Smg * Look algorithm here. Sort the I/O requests in ascending order of
194784b82766Smg * logical block number and process them starting with the lowest
194884b82766Smg * numbered block and progressing towards higher block numbers in the
194984b82766Smg * queue. Once there are no more higher numbered blocks, start again
195084b82766Smg * with the lowest one. This is good for CD/DVD as you keep moving
195184b82766Smg * the head in one direction along the outward spiral track and avoid
195284b82766Smg * too many seeks as much as possible. The re-ordering also allows
195384b82766Smg * us to coalesce adjacent requests into one larger request.
195484b82766Smg * This is thus essentially a 1-way Elevator with front merging.
195584b82766Smg *
195684b82766Smg * In addition each read request here has a deadline and will be
195784b82766Smg * processed out of turn if the deadline (500ms) expires.
195884b82766Smg *
195984b82766Smg * This function is necessarily serialized via hqueue->strategy_lock.
196084b82766Smg * This function sits just below hsfs_getapage and processes all read
196184b82766Smg * requests orginating from that function.
196284b82766Smg */
196384b82766Smg int
hsched_invoke_strategy(struct hsfs * fsp)196484b82766Smg hsched_invoke_strategy(struct hsfs *fsp)
196584b82766Smg {
196684b82766Smg struct hsfs_queue *hqueue;
196784b82766Smg struct buf *nbuf;
196884b82766Smg struct hio *fio, *nio, *tio, *prev, *last;
196984b82766Smg size_t bsize, soffset, offset, data;
197084b82766Smg int bioret, bufcount;
197184b82766Smg struct vnode *fvp;
197284b82766Smg ksema_t *io_done;
197384b82766Smg caddr_t iodata;
197484b82766Smg
197584b82766Smg hqueue = fsp->hqueue;
197684b82766Smg mutex_enter(&hqueue->strategy_lock);
197784b82766Smg mutex_enter(&hqueue->hsfs_queue_lock);
197884b82766Smg
197984b82766Smg /*
198084b82766Smg * Check for Deadline expiration first
198184b82766Smg */
198284b82766Smg fio = avl_first(&hqueue->deadline_tree);
198384b82766Smg
198484b82766Smg /*
198584b82766Smg * Paranoid check for empty I/O queue. Both deadline
198684b82766Smg * and read trees contain same data sorted in different
198784b82766Smg * ways. So empty deadline tree = empty read tree.
198884b82766Smg */
198984b82766Smg if (fio == NULL) {
199084b82766Smg /*
199184b82766Smg * Remove the sentinel if there was one.
199284b82766Smg */
199384b82766Smg if (hqueue->next != NULL) {
199484b82766Smg avl_remove(&hqueue->read_tree, hqueue->next);
199584b82766Smg kmem_cache_free(hio_cache, hqueue->next);
199684b82766Smg hqueue->next = NULL;
199784b82766Smg }
199884b82766Smg mutex_exit(&hqueue->hsfs_queue_lock);
199984b82766Smg mutex_exit(&hqueue->strategy_lock);
200084b82766Smg return (1);
200184b82766Smg }
200284b82766Smg
200384b82766Smg if (drv_hztousec(ddi_get_lbolt()) - fio->io_timestamp
200484b82766Smg < HSFS_READ_DEADLINE) {
200584b82766Smg /*
200684b82766Smg * Apply standard scheduling logic. This uses the
200784b82766Smg * C-LOOK approach. Process I/O requests in ascending
200884b82766Smg * order of logical block address till no subsequent
200984b82766Smg * higher numbered block request remains. Then start
201084b82766Smg * again from the lowest numbered block in the queue.
201184b82766Smg *
201284b82766Smg * We do this cheaply here by means of a sentinel.
201384b82766Smg * The last processed I/O structure from the previous
201484b82766Smg * invocation of this func, is left dangling in the
201584b82766Smg * read_tree so that we can easily scan to the next
201684b82766Smg * higher numbered request and remove the sentinel.
201784b82766Smg */
201884b82766Smg fio = NULL;
201984b82766Smg if (hqueue->next != NULL) {
202084b82766Smg fio = AVL_NEXT(&hqueue->read_tree, hqueue->next);
202184b82766Smg avl_remove(&hqueue->read_tree, hqueue->next);
202284b82766Smg kmem_cache_free(hio_cache, hqueue->next);
202384b82766Smg hqueue->next = NULL;
202484b82766Smg }
202584b82766Smg if (fio == NULL) {
202684b82766Smg fio = avl_first(&hqueue->read_tree);
202784b82766Smg }
202884b82766Smg } else if (hqueue->next != NULL) {
202984b82766Smg DTRACE_PROBE1(hsfs_deadline_expiry, struct hio *, fio);
203084b82766Smg
203184b82766Smg avl_remove(&hqueue->read_tree, hqueue->next);
203284b82766Smg kmem_cache_free(hio_cache, hqueue->next);
203384b82766Smg hqueue->next = NULL;
203484b82766Smg }
203584b82766Smg
203684b82766Smg /*
203784b82766Smg * In addition we try to coalesce contiguous
203884b82766Smg * requests into one bigger request.
203984b82766Smg */
204084b82766Smg bufcount = 1;
204184b82766Smg bsize = ldbtob(fio->nblocks);
204284b82766Smg fvp = fio->bp->b_file;
204384b82766Smg nio = AVL_NEXT(&hqueue->read_tree, fio);
204484b82766Smg tio = fio;
204584b82766Smg while (nio != NULL && IS_ADJACENT(tio, nio) &&
204684b82766Smg bsize < hqueue->dev_maxtransfer) {
204784b82766Smg avl_remove(&hqueue->deadline_tree, tio);
204884b82766Smg avl_remove(&hqueue->read_tree, tio);
204984b82766Smg tio->contig_chain = nio;
205084b82766Smg bsize += ldbtob(nio->nblocks);
205184b82766Smg prev = tio;
205284b82766Smg tio = nio;
205384b82766Smg
205484b82766Smg /*
205584b82766Smg * This check is required to detect the case where
205684b82766Smg * we are merging adjacent buffers belonging to
205784b82766Smg * different files. fvp is used to set the b_file
205884b82766Smg * parameter in the coalesced buf. b_file is used
205984b82766Smg * by DTrace so we do not want DTrace to accrue
206084b82766Smg * requests to two different files to any one file.
206184b82766Smg */
206284b82766Smg if (fvp && tio->bp->b_file != fvp) {
206384b82766Smg fvp = NULL;
206484b82766Smg }
206584b82766Smg
206684b82766Smg nio = AVL_NEXT(&hqueue->read_tree, nio);
206784b82766Smg bufcount++;
206884b82766Smg }
206984b82766Smg
207084b82766Smg /*
207184b82766Smg * tio is not removed from the read_tree as it serves as a sentinel
207284b82766Smg * to cheaply allow us to scan to the next higher numbered I/O
207384b82766Smg * request.
207484b82766Smg */
207584b82766Smg hqueue->next = tio;
207684b82766Smg avl_remove(&hqueue->deadline_tree, tio);
207784b82766Smg mutex_exit(&hqueue->hsfs_queue_lock);
207884b82766Smg DTRACE_PROBE3(hsfs_io_dequeued, struct hio *, fio, int, bufcount,
207984b82766Smg size_t, bsize);
208084b82766Smg
208184b82766Smg /*
208284b82766Smg * The benefit of coalescing occurs if the the savings in I/O outweighs
208384b82766Smg * the cost of doing the additional work below.
208484b82766Smg * It was observed that coalescing 2 buffers results in diminishing
208584b82766Smg * returns, so we do coalescing if we have >2 adjacent bufs.
208684b82766Smg */
208784b82766Smg if (bufcount > hsched_coalesce_min) {
208884b82766Smg /*
208984b82766Smg * We have coalesced blocks. First allocate mem and buf for
209084b82766Smg * the entire coalesced chunk.
209184b82766Smg * Since we are guaranteed single-threaded here we pre-allocate
209284b82766Smg * one buf at mount time and that is re-used every time. This
209384b82766Smg * is a synthesized buf structure that uses kmem_alloced chunk.
209484b82766Smg * Not quite a normal buf attached to pages.
209584b82766Smg */
209684b82766Smg fsp->coalesced_bytes += bsize;
209784b82766Smg nbuf = hqueue->nbuf;
209884b82766Smg bioinit(nbuf);
209984b82766Smg nbuf->b_edev = fio->bp->b_edev;
210084b82766Smg nbuf->b_dev = fio->bp->b_dev;
210184b82766Smg nbuf->b_flags = fio->bp->b_flags;
210284b82766Smg nbuf->b_iodone = fio->bp->b_iodone;
210384b82766Smg iodata = kmem_alloc(bsize, KM_SLEEP);
210484b82766Smg nbuf->b_un.b_addr = iodata;
210584b82766Smg nbuf->b_lblkno = fio->bp->b_lblkno;
210684b82766Smg nbuf->b_vp = fvp;
210784b82766Smg nbuf->b_file = fvp;
210884b82766Smg nbuf->b_bcount = bsize;
210984b82766Smg nbuf->b_bufsize = bsize;
211084b82766Smg
211184b82766Smg DTRACE_PROBE3(hsfs_coalesced_io_start, struct hio *, fio, int,
211284b82766Smg bufcount, size_t, bsize);
211384b82766Smg
211484b82766Smg /*
211584b82766Smg * Perform I/O for the coalesced block.
211684b82766Smg */
211784b82766Smg (void) bdev_strategy(nbuf);
211884b82766Smg
211984b82766Smg /*
212084b82766Smg * Duplicate the last IO node to leave the sentinel alone.
212184b82766Smg * The sentinel is freed in the next invocation of this
212284b82766Smg * function.
212384b82766Smg */
212484b82766Smg prev->contig_chain = kmem_cache_alloc(hio_cache, KM_SLEEP);
212584b82766Smg prev->contig_chain->bp = tio->bp;
212684b82766Smg prev->contig_chain->sema = tio->sema;
212784b82766Smg tio = prev->contig_chain;
212884b82766Smg tio->contig_chain = NULL;
212984b82766Smg soffset = ldbtob(fio->bp->b_lblkno);
213084b82766Smg nio = fio;
213184b82766Smg
213284b82766Smg bioret = biowait(nbuf);
213384b82766Smg data = bsize - nbuf->b_resid;
213484b82766Smg biofini(nbuf);
213584b82766Smg mutex_exit(&hqueue->strategy_lock);
213684b82766Smg
213784b82766Smg /*
213884b82766Smg * We use the b_resid parameter to detect how much
213984b82766Smg * data was succesfully transferred. We will signal
214084b82766Smg * a success to all the fully retrieved actual bufs
214184b82766Smg * before coalescing, rest is signaled as error,
214284b82766Smg * if any.
214384b82766Smg */
214484b82766Smg tio = nio;
214584b82766Smg DTRACE_PROBE3(hsfs_coalesced_io_done, struct hio *, nio,
214684b82766Smg int, bioret, size_t, data);
214784b82766Smg
214884b82766Smg /*
214984b82766Smg * Copy data and signal success to all the bufs
215084b82766Smg * which can be fully satisfied from b_resid.
215184b82766Smg */
215284b82766Smg while (nio != NULL && data >= nio->bp->b_bcount) {
215384b82766Smg offset = ldbtob(nio->bp->b_lblkno) - soffset;
215484b82766Smg bcopy(iodata + offset, nio->bp->b_un.b_addr,
215584b82766Smg nio->bp->b_bcount);
215684b82766Smg data -= nio->bp->b_bcount;
215784b82766Smg bioerror(nio->bp, 0);
215884b82766Smg biodone(nio->bp);
215984b82766Smg sema_v(nio->sema);
216084b82766Smg tio = nio;
216184b82766Smg nio = nio->contig_chain;
216284b82766Smg kmem_cache_free(hio_cache, tio);
216384b82766Smg }
216484b82766Smg
216584b82766Smg /*
216684b82766Smg * Signal error to all the leftover bufs (if any)
216784b82766Smg * after b_resid data is exhausted.
216884b82766Smg */
216984b82766Smg while (nio != NULL) {
217084b82766Smg nio->bp->b_resid = nio->bp->b_bcount - data;
217184b82766Smg bzero(nio->bp->b_un.b_addr + data, nio->bp->b_resid);
217284b82766Smg bioerror(nio->bp, bioret);
217384b82766Smg biodone(nio->bp);
217484b82766Smg sema_v(nio->sema);
217584b82766Smg tio = nio;
217684b82766Smg nio = nio->contig_chain;
217784b82766Smg kmem_cache_free(hio_cache, tio);
217884b82766Smg data = 0;
217984b82766Smg }
218084b82766Smg kmem_free(iodata, bsize);
218184b82766Smg } else {
218284b82766Smg
218384b82766Smg nbuf = tio->bp;
218484b82766Smg io_done = tio->sema;
218584b82766Smg nio = fio;
218684b82766Smg last = tio;
218784b82766Smg
218884b82766Smg while (nio != NULL) {
218984b82766Smg (void) bdev_strategy(nio->bp);
219084b82766Smg nio = nio->contig_chain;
219184b82766Smg }
219284b82766Smg nio = fio;
219384b82766Smg mutex_exit(&hqueue->strategy_lock);
219484b82766Smg
219584b82766Smg while (nio != NULL) {
219684b82766Smg if (nio == last) {
219784b82766Smg (void) biowait(nbuf);
219884b82766Smg sema_v(io_done);
219984b82766Smg break;
220084b82766Smg /* sentinel last not freed. See above. */
220184b82766Smg } else {
220284b82766Smg (void) biowait(nio->bp);
220384b82766Smg sema_v(nio->sema);
220484b82766Smg }
220584b82766Smg tio = nio;
220684b82766Smg nio = nio->contig_chain;
220784b82766Smg kmem_cache_free(hio_cache, tio);
220884b82766Smg }
220984b82766Smg }
221084b82766Smg return (0);
221184b82766Smg }
221284b82766Smg
221384b82766Smg /*
221484b82766Smg * Insert an I/O request in the I/O scheduler's pipeline
221584b82766Smg * Using AVL tree makes it easy to reorder the I/O request
221684b82766Smg * based on logical block number.
221784b82766Smg */
221884b82766Smg static void
hsched_enqueue_io(struct hsfs * fsp,struct hio * hsio,int ra)221984b82766Smg hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra)
222084b82766Smg {
222184b82766Smg struct hsfs_queue *hqueue = fsp->hqueue;
222284b82766Smg
222384b82766Smg mutex_enter(&hqueue->hsfs_queue_lock);
222484b82766Smg
222584b82766Smg fsp->physical_read_bytes += hsio->bp->b_bcount;
222684b82766Smg if (ra)
222784b82766Smg fsp->readahead_bytes += hsio->bp->b_bcount;
222884b82766Smg
222984b82766Smg avl_add(&hqueue->deadline_tree, hsio);
223084b82766Smg avl_add(&hqueue->read_tree, hsio);
223184b82766Smg
223284b82766Smg DTRACE_PROBE3(hsfs_io_enqueued, struct hio *, hsio,
223384b82766Smg struct hsfs_queue *, hqueue, int, ra);
223484b82766Smg
223584b82766Smg mutex_exit(&hqueue->hsfs_queue_lock);
223684b82766Smg }
223784b82766Smg
2238fc1c62b8Sfrankho /* ARGSUSED */
2239fc1c62b8Sfrankho static int
hsfs_pathconf(struct vnode * vp,int cmd,ulong_t * valp,struct cred * cr,caller_context_t * ct)2240*ade42b55SSebastien Roy hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
2241*ade42b55SSebastien Roy caller_context_t *ct)
2242fc1c62b8Sfrankho {
2243fc1c62b8Sfrankho struct hsfs *fsp;
2244fc1c62b8Sfrankho
2245fc1c62b8Sfrankho int error = 0;
2246fc1c62b8Sfrankho
2247fc1c62b8Sfrankho switch (cmd) {
2248fc1c62b8Sfrankho
2249fc1c62b8Sfrankho case _PC_NAME_MAX:
2250fc1c62b8Sfrankho fsp = VFS_TO_HSFS(vp->v_vfsp);
2251fc1c62b8Sfrankho *valp = fsp->hsfs_namemax;
2252fc1c62b8Sfrankho break;
2253fc1c62b8Sfrankho
2254fc1c62b8Sfrankho case _PC_FILESIZEBITS:
2255fc1c62b8Sfrankho *valp = 33; /* Without multi extent support: 4 GB - 2k */
2256fc1c62b8Sfrankho break;
2257fc1c62b8Sfrankho
22583b862e9aSRoger A. Faulkner case _PC_TIMESTAMP_RESOLUTION:
22593b862e9aSRoger A. Faulkner /*
22603b862e9aSRoger A. Faulkner * HSFS keeps, at best, 1/100 second timestamp resolution.
22613b862e9aSRoger A. Faulkner */
22623b862e9aSRoger A. Faulkner *valp = 10000000L;
22633b862e9aSRoger A. Faulkner break;
22643b862e9aSRoger A. Faulkner
2265fc1c62b8Sfrankho default:
2266da6c28aaSamw error = fs_pathconf(vp, cmd, valp, cr, ct);
22673b862e9aSRoger A. Faulkner break;
2268fc1c62b8Sfrankho }
2269fc1c62b8Sfrankho
2270fc1c62b8Sfrankho return (error);
2271fc1c62b8Sfrankho }
2272fc1c62b8Sfrankho
2273fc1c62b8Sfrankho
2274fc1c62b8Sfrankho
22757c478bd9Sstevel@tonic-gate const fs_operation_def_t hsfs_vnodeops_template[] = {
2276aa59c4cbSrsb VOPNAME_OPEN, { .vop_open = hsfs_open },
2277aa59c4cbSrsb VOPNAME_CLOSE, { .vop_close = hsfs_close },
2278aa59c4cbSrsb VOPNAME_READ, { .vop_read = hsfs_read },
2279aa59c4cbSrsb VOPNAME_GETATTR, { .vop_getattr = hsfs_getattr },
2280aa59c4cbSrsb VOPNAME_ACCESS, { .vop_access = hsfs_access },
2281aa59c4cbSrsb VOPNAME_LOOKUP, { .vop_lookup = hsfs_lookup },
2282aa59c4cbSrsb VOPNAME_READDIR, { .vop_readdir = hsfs_readdir },
2283aa59c4cbSrsb VOPNAME_READLINK, { .vop_readlink = hsfs_readlink },
2284aa59c4cbSrsb VOPNAME_FSYNC, { .vop_fsync = hsfs_fsync },
2285aa59c4cbSrsb VOPNAME_INACTIVE, { .vop_inactive = hsfs_inactive },
2286aa59c4cbSrsb VOPNAME_FID, { .vop_fid = hsfs_fid },
2287aa59c4cbSrsb VOPNAME_SEEK, { .vop_seek = hsfs_seek },
2288aa59c4cbSrsb VOPNAME_FRLOCK, { .vop_frlock = hsfs_frlock },
2289aa59c4cbSrsb VOPNAME_GETPAGE, { .vop_getpage = hsfs_getpage },
2290aa59c4cbSrsb VOPNAME_PUTPAGE, { .vop_putpage = hsfs_putpage },
2291aa59c4cbSrsb VOPNAME_MAP, { .vop_map = hsfs_map },
2292aa59c4cbSrsb VOPNAME_ADDMAP, { .vop_addmap = hsfs_addmap },
2293aa59c4cbSrsb VOPNAME_DELMAP, { .vop_delmap = hsfs_delmap },
2294aa59c4cbSrsb VOPNAME_PATHCONF, { .vop_pathconf = hsfs_pathconf },
2295aa59c4cbSrsb NULL, NULL
22967c478bd9Sstevel@tonic-gate };
22977c478bd9Sstevel@tonic-gate
22987c478bd9Sstevel@tonic-gate struct vnodeops *hsfs_vnodeops;
2299