xref: /illumos-gate/usr/src/uts/common/fs/hsfs/hsfs_vnops.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Vnode operations for the High Sierra filesystem
31  */
32 
33 #include <sys/types.h>
34 #include <sys/t_lock.h>
35 #include <sys/param.h>
36 #include <sys/time.h>
37 #include <sys/systm.h>
38 #include <sys/sysmacros.h>
39 #include <sys/resource.h>
40 #include <sys/signal.h>
41 #include <sys/cred.h>
42 #include <sys/user.h>
43 #include <sys/buf.h>
44 #include <sys/vfs.h>
45 #include <sys/stat.h>
46 #include <sys/vnode.h>
47 #include <sys/mode.h>
48 #include <sys/proc.h>
49 #include <sys/disp.h>
50 #include <sys/file.h>
51 #include <sys/fcntl.h>
52 #include <sys/flock.h>
53 #include <sys/kmem.h>
54 #include <sys/uio.h>
55 #include <sys/conf.h>
56 #include <sys/errno.h>
57 #include <sys/mman.h>
58 #include <sys/pathname.h>
59 #include <sys/debug.h>
60 #include <sys/vmsystm.h>
61 #include <sys/cmn_err.h>
62 #include <sys/fbuf.h>
63 #include <sys/dirent.h>
64 #include <sys/errno.h>
65 
66 #include <vm/hat.h>
67 #include <vm/page.h>
68 #include <vm/pvn.h>
69 #include <vm/as.h>
70 #include <vm/seg.h>
71 #include <vm/seg_map.h>
72 #include <vm/seg_kmem.h>
73 #include <vm/seg_vn.h>
74 #include <vm/rm.h>
75 #include <vm/page.h>
76 #include <sys/swap.h>
77 
78 #include <sys/fs/hsfs_spec.h>
79 #include <sys/fs/hsfs_node.h>
80 #include <sys/fs/hsfs_impl.h>
81 #include <sys/fs/hsfs_susp.h>
82 #include <sys/fs/hsfs_rrip.h>
83 
84 #include <fs/fs_subr.h>
85 
86 /* ARGSUSED */
87 static int
88 hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred)
89 {
90 	return (0);
91 }
92 
93 
94 /*ARGSUSED*/
95 static int
96 hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
97 	struct caller_context *ct)
98 {
99 	struct hsnode *hp;
100 	ulong_t off;
101 	long mapon, on;
102 	caddr_t base;
103 	uint_t	filesize;
104 	long nbytes, n;
105 	uint_t flags;
106 	int error;
107 
108 	hp = VTOH(vp);
109 	/*
110 	 * if vp is of type VDIR, make sure dirent
111 	 * is filled up with all info (because of ptbl)
112 	 */
113 	if (vp->v_type == VDIR) {
114 		if (hp->hs_dirent.ext_size == 0)
115 			hs_filldirent(vp, &hp->hs_dirent);
116 	}
117 	filesize = hp->hs_dirent.ext_size;
118 
119 	if (uiop->uio_loffset >= MAXOFF_T) {
120 		error = 0;
121 		goto out;
122 	}
123 
124 	if (uiop->uio_offset >= filesize) {
125 		error = 0;
126 		goto out;
127 	}
128 
129 	do {
130 		/* map file to correct page boundary */
131 		off = uiop->uio_offset & MAXBMASK;
132 		mapon = uiop->uio_offset & MAXBOFFSET;
133 
134 		/* set read in data size */
135 		on = (uiop->uio_offset) & PAGEOFFSET;
136 		nbytes = MIN(PAGESIZE - on, uiop->uio_resid);
137 		/* adjust down if > EOF */
138 		n = MIN((filesize - uiop->uio_offset), nbytes);
139 		if (n == 0) {
140 			error = 0;
141 			goto out;
142 		}
143 
144 		/* map the file into memory */
145 		base = segmap_getmapflt(segkmap, vp, (u_offset_t)off,
146 					MAXBSIZE, 1, S_READ);
147 
148 		error = uiomove(base+mapon, (size_t)n, UIO_READ, uiop);
149 		if (error == 0) {
150 			/*
151 			 * if read a whole block, or read to eof,
152 			 *  won't need this buffer again soon.
153 			 */
154 			if (n + on == PAGESIZE ||
155 			    uiop->uio_offset == filesize)
156 				flags = SM_DONTNEED;
157 			else
158 				flags = 0;
159 			error = segmap_release(segkmap, base, flags);
160 		} else
161 			(void) segmap_release(segkmap, base, 0);
162 
163 	} while (error == 0 && uiop->uio_resid > 0);
164 
165 out:
166 	return (error);
167 }
168 
169 /*ARGSUSED2*/
170 static int
171 hsfs_getattr(
172 	struct vnode *vp,
173 	struct vattr *vap,
174 	int flags,
175 	struct cred *cred)
176 {
177 	struct hsnode *hp;
178 	struct vfs *vfsp;
179 	struct hsfs *fsp;
180 
181 	hp = VTOH(vp);
182 	fsp = VFS_TO_HSFS(vp->v_vfsp);
183 	vfsp = vp->v_vfsp;
184 
185 	if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
186 		hs_filldirent(vp, &hp->hs_dirent);
187 	}
188 	vap->va_type = IFTOVT(hp->hs_dirent.mode);
189 	vap->va_mode = hp->hs_dirent.mode;
190 	vap->va_uid = hp->hs_dirent.uid;
191 	vap->va_gid = hp->hs_dirent.gid;
192 
193 	vap->va_fsid = vfsp->vfs_dev;
194 	vap->va_nodeid = (ino64_t)hp->hs_nodeid;
195 	vap->va_nlink = hp->hs_dirent.nlink;
196 	vap->va_size =	(offset_t)hp->hs_dirent.ext_size;
197 
198 	vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
199 	vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
200 	vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
201 	vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
202 	vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
203 	vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
204 	if (vp->v_type == VCHR || vp->v_type == VBLK)
205 		vap->va_rdev = hp->hs_dirent.r_dev;
206 	else
207 		vap->va_rdev = 0;
208 	vap->va_blksize = vfsp->vfs_bsize;
209 	/* no. of blocks = no. of data blocks + no. of xar blocks */
210 	vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
211 	    (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
212 	vap->va_seq = hp->hs_seq;
213 	return (0);
214 }
215 
216 /*ARGSUSED*/
217 static int
218 hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred)
219 {
220 	struct hsnode *hp;
221 
222 	if (vp->v_type != VLNK)
223 		return (EINVAL);
224 
225 	hp = VTOH(vp);
226 
227 	if (hp->hs_dirent.sym_link == (char *)NULL)
228 		return (ENOENT);
229 
230 	return (uiomove(hp->hs_dirent.sym_link,
231 	    (size_t)MIN(hp->hs_dirent.ext_size,
232 	    uiop->uio_resid), UIO_READ, uiop));
233 }
234 
235 /*ARGSUSED*/
236 static void
237 hsfs_inactive(struct vnode *vp, struct cred *cred)
238 {
239 	struct hsnode *hp;
240 	struct hsfs *fsp;
241 
242 	int nopage;
243 
244 	hp = VTOH(vp);
245 	fsp = VFS_TO_HSFS(vp->v_vfsp);
246 	/*
247 	 * Note: acquiring and holding v_lock for quite a while
248 	 * here serializes on the vnode; this is unfortunate, but
249 	 * likely not to overly impact performance, as the underlying
250 	 * device (CDROM drive) is quite slow.
251 	 */
252 	rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
253 	mutex_enter(&hp->hs_contents_lock);
254 	mutex_enter(&vp->v_lock);
255 
256 	if (vp->v_count < 1) {
257 		panic("hsfs_inactive: v_count < 1");
258 		/*NOTREACHED*/
259 	}
260 
261 	if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
262 		vp->v_count--;	/* release hold from vn_rele */
263 		mutex_exit(&vp->v_lock);
264 		mutex_exit(&hp->hs_contents_lock);
265 		rw_exit(&fsp->hsfs_hash_lock);
266 		return;
267 	}
268 	vp->v_count--;	/* release hold from vn_rele */
269 	if (vp->v_count == 0) {
270 		/*
271 		 * Free the hsnode.
272 		 * If there are no pages associated with the
273 		 * hsnode, give it back to the kmem_cache,
274 		 * else put at the end of this file system's
275 		 * internal free list.
276 		 */
277 		nopage = !vn_has_cached_data(vp);
278 		hp->hs_flags = 0;
279 		/*
280 		 * exit these locks now, since hs_freenode may
281 		 * kmem_free the hsnode and embedded vnode
282 		 */
283 		mutex_exit(&vp->v_lock);
284 		mutex_exit(&hp->hs_contents_lock);
285 		hs_freenode(vp, fsp, nopage);
286 	} else {
287 		mutex_exit(&vp->v_lock);
288 		mutex_exit(&hp->hs_contents_lock);
289 	}
290 	rw_exit(&fsp->hsfs_hash_lock);
291 }
292 
293 
294 /*ARGSUSED*/
295 static int
296 hsfs_lookup(
297 	struct vnode *dvp,
298 	char *nm,
299 	struct vnode **vpp,
300 	struct pathname *pnp,
301 	int flags,
302 	struct vnode *rdir,
303 	struct cred *cred)
304 {
305 	int error;
306 	int namelen = (int)strlen(nm);
307 
308 	if (*nm == '\0') {
309 		VN_HOLD(dvp);
310 		*vpp = dvp;
311 		return (0);
312 	}
313 
314 	/*
315 	 * If we're looking for ourself, life is simple.
316 	 */
317 	if (namelen == 1 && *nm == '.') {
318 		if (error = hs_access(dvp, (mode_t)VEXEC, cred))
319 			return (error);
320 		VN_HOLD(dvp);
321 		*vpp = dvp;
322 		return (0);
323 	}
324 
325 	return (hs_dirlook(dvp, nm, namelen, vpp, cred));
326 }
327 
328 
329 /*ARGSUSED*/
330 static int
331 hsfs_readdir(
332 	struct vnode	*vp,
333 	struct uio	*uiop,
334 	struct cred	*cred,
335 	int		*eofp)
336 {
337 	struct hsnode	*dhp;
338 	struct hsfs	*fsp;
339 	struct hs_direntry hd;
340 	struct dirent64	*nd;
341 	int		error;
342 	uint_t		offset;		/* real offset in directory */
343 	uint_t		dirsiz;		/* real size of directory */
344 	uchar_t		*blkp;
345 	int		hdlen;		/* length of hs directory entry */
346 	long		ndlen;		/* length of dirent entry */
347 	int		bytes_wanted;
348 	size_t		bufsize;	/* size of dirent buffer */
349 	char		*outbuf;	/* ptr to dirent buffer */
350 	char		*dname;
351 	int		dnamelen;
352 	size_t		dname_size;
353 	struct fbuf	*fbp;
354 	uint_t		last_offset;	/* last index into current dir block */
355 	ulong_t		dir_lbn;	/* lbn of directory */
356 	ino64_t		dirino;	/* temporary storage before storing in dirent */
357 	off_t		diroff;
358 
359 	dhp = VTOH(vp);
360 	fsp = VFS_TO_HSFS(vp->v_vfsp);
361 	if (dhp->hs_dirent.ext_size == 0)
362 		hs_filldirent(vp, &dhp->hs_dirent);
363 	dirsiz = dhp->hs_dirent.ext_size;
364 	dir_lbn = dhp->hs_dirent.ext_lbn;
365 	if (uiop->uio_loffset >= dirsiz) {	/* at or beyond EOF */
366 		if (eofp)
367 			*eofp = 1;
368 		return (0);
369 	}
370 	ASSERT(uiop->uio_loffset <= MAXOFF_T);
371 	offset = (uint_t)uiop->uio_offset;
372 
373 	dname_size = fsp->hsfs_namemax + 1;	/* 1 for the ending NUL */
374 	dname = kmem_alloc(dname_size, KM_SLEEP);
375 	bufsize = uiop->uio_resid + sizeof (struct dirent64);
376 
377 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
378 	nd = (struct dirent64 *)outbuf;
379 
380 	while (offset < dirsiz) {
381 		if ((offset & MAXBMASK) + MAXBSIZE > dirsiz)
382 			bytes_wanted = dirsiz - (offset & MAXBMASK);
383 		else
384 			bytes_wanted = MAXBSIZE;
385 
386 		error = fbread(vp, (offset_t)(offset & MAXBMASK),
387 			(unsigned int)bytes_wanted, S_READ, &fbp);
388 		if (error)
389 			goto done;
390 
391 		blkp = (uchar_t *)fbp->fb_addr;
392 		last_offset = (offset & MAXBMASK) + fbp->fb_count - 1;
393 
394 #define	rel_offset(offset) ((offset) & MAXBOFFSET)	/* index into blkp */
395 
396 		while (offset < last_offset) {
397 			/*
398 			 * Directory Entries cannot span sectors.
399 			 * Unused bytes at the end of each sector are zeroed.
400 			 * Therefore, detect this condition when the size
401 			 * field of the directory entry is zero.
402 			 */
403 			hdlen = (int)((uchar_t)
404 				HDE_DIR_LEN(&blkp[rel_offset(offset)]));
405 			if (hdlen == 0) {
406 				/* advance to next sector boundary */
407 				offset = (offset & MAXHSMASK) + HS_SECTOR_SIZE;
408 
409 				/*
410 				 * Have we reached the end of current block?
411 				 */
412 				if (offset > last_offset)
413 					break;
414 				else
415 					continue;
416 			}
417 
418 			/* make sure this is nullified before  reading it */
419 			bzero(&hd, sizeof (hd));
420 
421 			/*
422 			 * Just ignore invalid directory entries.
423 			 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
424 			 */
425 			if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
426 				&hd, dname, &dnamelen)) {
427 				/*
428 				 * Determine if there is enough room
429 				 */
430 				ndlen = (long)DIRENT64_RECLEN((dnamelen));
431 
432 				if ((ndlen + ((char *)nd - outbuf)) >
433 				    uiop->uio_resid) {
434 					fbrelse(fbp, S_READ);
435 					goto done; /* output buffer full */
436 				}
437 
438 				diroff = offset + hdlen;
439 				/*
440 				 * Generate nodeid.
441 				 * If a directory, nodeid points to the
442 				 * canonical dirent describing the directory:
443 				 * the dirent of the "." entry for the
444 				 * directory, which is pointed to by all
445 				 * dirents for that directory.
446 				 * Otherwise, nodeid points to dirent of file.
447 				 */
448 				if (hd.type == VDIR) {
449 					dirino = (ino64_t)
450 					    MAKE_NODEID(hd.ext_lbn, 0,
451 					    vp->v_vfsp);
452 				} else {
453 					struct hs_volume *hvp;
454 					offset_t lbn, off;
455 
456 					/*
457 					 * Normalize lbn and off
458 					 */
459 					hvp = &fsp->hsfs_vol;
460 					lbn = dir_lbn +
461 					    (offset >> hvp->lbn_shift);
462 					off = offset & hvp->lbn_maxoffset;
463 					dirino = (ino64_t)MAKE_NODEID(lbn,
464 					    off, vp->v_vfsp);
465 				}
466 
467 
468 				/* strncpy(9f) will zero uninitialized bytes */
469 
470 				ASSERT(strlen(dname) + 1 <=
471 				    DIRENT64_NAMELEN(ndlen));
472 				(void) strncpy(nd->d_name, dname,
473 				    DIRENT64_NAMELEN(ndlen));
474 				nd->d_reclen = (ushort_t)ndlen;
475 				nd->d_off = (offset_t)diroff;
476 				nd->d_ino = dirino;
477 				nd = (struct dirent64 *)((char *)nd + ndlen);
478 
479 				/*
480 				 * free up space allocated for symlink
481 				 */
482 				if (hd.sym_link != (char *)NULL) {
483 					kmem_free(hd.sym_link,
484 					    (size_t)(hd.ext_size+1));
485 					hd.sym_link = (char *)NULL;
486 				}
487 			}
488 
489 			offset += hdlen;
490 		}
491 		fbrelse(fbp, S_READ);
492 	}
493 
494 	/*
495 	 * Got here for one of the following reasons:
496 	 *	1) outbuf is full (error == 0)
497 	 *	2) end of directory reached (error == 0)
498 	 *	3) error reading directory sector (error != 0)
499 	 *	4) directory entry crosses sector boundary (error == 0)
500 	 *
501 	 * If any directory entries have been copied, don't report
502 	 * case 4.  Instead, return the valid directory entries.
503 	 *
504 	 * If no entries have been copied, report the error.
505 	 * If case 4, this will be indistiguishable from EOF.
506 	 */
507 done:
508 	ndlen = ((char *)nd - outbuf);
509 	if (ndlen != 0) {
510 		error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
511 		uiop->uio_offset = offset;
512 	}
513 	kmem_free(dname, dname_size);
514 	kmem_free(outbuf, bufsize);
515 	if (eofp && error == 0)
516 		*eofp = (uiop->uio_offset >= dirsiz);
517 	return (error);
518 }
519 
520 static int
521 hsfs_fid(struct vnode *vp, struct fid *fidp)
522 {
523 	struct hsnode *hp;
524 	struct hsfid *fid;
525 
526 	if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
527 		fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
528 		return (ENOSPC);
529 	}
530 
531 	fid = (struct hsfid *)fidp;
532 	fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
533 	hp = VTOH(vp);
534 	mutex_enter(&hp->hs_contents_lock);
535 	fid->hf_dir_lbn = hp->hs_dir_lbn;
536 	fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
537 	mutex_exit(&hp->hs_contents_lock);
538 	return (0);
539 }
540 
541 /*ARGSUSED*/
542 static int
543 hsfs_open(struct vnode **vpp, int flag, struct cred *cred)
544 {
545 	return (0);
546 }
547 
548 /*ARGSUSED*/
549 static int
550 hsfs_close(
551 	struct vnode *vp,
552 	int flag,
553 	int count,
554 	offset_t offset,
555 	struct cred *cred)
556 {
557 	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
558 	cleanshares(vp, ttoproc(curthread)->p_pid);
559 	return (0);
560 }
561 
562 /*ARGSUSED2*/
563 static int
564 hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred)
565 {
566 	return (hs_access(vp, (mode_t)mode, cred));
567 }
568 
569 /*
570  * the seek time of a CD-ROM is very slow, and data transfer
571  * rate is even worse (max. 150K per sec).  The design
572  * decision is to reduce access to cd-rom as much as possible,
573  * and to transfer a sizable block (read-ahead) of data at a time.
574  * UFS style of read ahead one block at a time is not appropriate,
575  * and is not supported
576  */
577 
578 /*
579  * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
580  */
581 #define	KLUSTSIZE	(56 * 1024)
582 /* we don't support read ahead */
583 int hsfs_lostpage;	/* no. of times we lost original page */
584 
585 /*
586  * Used to prevent biodone() from releasing buf resources that
587  * we didn't allocate in quite the usual way.
588  */
589 /*ARGSUSED*/
590 int
591 hsfs_iodone(struct buf *bp)
592 {
593 	sema_v(&bp->b_io);
594 	return (0);
595 }
596 
597 /*
598  * Each file may have a different interleaving on disk.  This makes
599  * things somewhat interesting.  The gist is that there are some
600  * number of contiguous data sectors, followed by some other number
601  * of contiguous skip sectors.  The sum of those two sets of sectors
602  * defines the interleave size.  Unfortunately, it means that we generally
603  * can't simply read N sectors starting at a given offset to satisfy
604  * any given request.
605  *
606  * What we do is get the relevant memory pages via pvn_read_kluster(),
607  * then stride through the interleaves, setting up a buf for each
608  * sector that needs to be brought in.  Instead of kmem_alloc'ing
609  * space for the sectors, though, we just point at the appropriate
610  * spot in the relevant page for each of them.  This saves us a bunch
611  * of copying.
612  */
613 /*ARGSUSED*/
614 static int
615 hsfs_getapage(
616 	struct vnode *vp,
617 	u_offset_t off,
618 	size_t len,
619 	uint_t *protp,
620 	struct page *pl[],
621 	size_t plsz,
622 	struct seg *seg,
623 	caddr_t addr,
624 	enum seg_rw rw,
625 	struct cred *cred)
626 {
627 	struct hsnode *hp;
628 	struct hsfs *fsp;
629 	int	err;
630 	struct buf *bufs;
631 	caddr_t *vas;
632 	caddr_t va;
633 	struct page *pp, *searchp, *lastp;
634 	page_t	*pagefound;
635 	offset_t	bof;
636 	struct vnode *devvp;
637 	ulong_t	byte_offset;
638 	size_t	io_len_tmp;
639 	uint_t	io_off, io_len;
640 	uint_t	xlen;
641 	uint_t	filsiz;
642 	uint_t	secsize;
643 	uint_t	bufcnt;
644 	uint_t	bufsused;
645 	uint_t	count;
646 	uint_t	io_end;
647 	uint_t	which_chunk_lbn;
648 	uint_t	offset_lbn;
649 	uint_t	offset_extra;
650 	offset_t	offset_bytes;
651 	uint_t	remaining_bytes;
652 	uint_t	extension;
653 	int	remainder;	/* must be signed */
654 	int	chunk_lbn_count;
655 	int	chunk_data_bytes;
656 	int	xarsiz;
657 	diskaddr_t driver_block;
658 	u_offset_t io_off_tmp;
659 
660 	/*
661 	 * We don't support asynchronous operation at the moment, so
662 	 * just pretend we did it.  If the pages are ever actually
663 	 * needed, they'll get brought in then.
664 	 */
665 	if (pl == NULL)
666 		return (0);
667 
668 	hp = VTOH(vp);
669 	fsp = VFS_TO_HSFS(vp->v_vfsp);
670 	devvp = fsp->hsfs_devvp;
671 	secsize = fsp->hsfs_vol.lbn_size;  /* bytes per logical block */
672 
673 	/* file data size */
674 	filsiz = hp->hs_dirent.ext_size;
675 
676 	/* disk addr for start of file */
677 	bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
678 
679 	/* xarsiz byte must be skipped for data */
680 	xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
681 
682 	/* how many logical blocks in an interleave (data+skip) */
683 	chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
684 
685 	if (chunk_lbn_count == 0) {
686 		chunk_lbn_count = 1;
687 	}
688 
689 	/*
690 	 * Convert interleaving size into bytes.  The zero case
691 	 * (no interleaving) optimization is handled as a side-
692 	 * effect of the read-ahead logic.
693 	 */
694 	if (hp->hs_dirent.intlf_sz == 0) {
695 		chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
696 	} else {
697 		chunk_data_bytes = LBN_TO_BYTE(hp->hs_dirent.intlf_sz,
698 			vp->v_vfsp);
699 	}
700 
701 reread:
702 	err = 0;
703 	pagefound = 0;
704 
705 	/*
706 	 * Do some read-ahead.  This mostly saves us a bit of
707 	 * system cpu time more than anything else when doing
708 	 * sequential reads.  At some point, could do the
709 	 * read-ahead asynchronously which might gain us something
710 	 * on wall time, but it seems unlikely....
711 	 *
712 	 * We do the easy case here, which is to read through
713 	 * the end of the chunk, minus whatever's at the end that
714 	 * won't exactly fill a page.
715 	 */
716 	which_chunk_lbn = (off + len) / chunk_data_bytes;
717 	extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
718 	extension -= (extension % PAGESIZE);
719 	if (extension != 0 && extension < filsiz - off) {
720 		len = extension;
721 	}
722 	/*
723 	 * Some cd writers don't write sectors that aren't used.  Also,
724 	 * there's no point in reading sectors we'll never look at.  So,
725 	 * if we're asked to go beyond the end of a file, truncate to the
726 	 * length of that file.
727 	 *
728 	 * Additionally, this behaviour is required by section 6.4.5 of
729 	 * ISO 9660:1988(E).
730 	 */
731 	if (len > (filsiz - off)) {
732 		len = filsiz - off;
733 	}
734 
735 	/*
736 	 * After all that, make sure we're asking for things in units
737 	 * that bdev_strategy() will understand (see bug 4202551).
738 	 */
739 	len = roundup(len, DEV_BSIZE);
740 
741 	pp = NULL;
742 again:
743 	/* search for page in buffer */
744 	if ((pagefound = page_exists(vp, off)) == 0) {
745 		/*
746 		 * Need to really do disk IO to get the page.
747 		 */
748 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
749 		    &io_len_tmp, off, len, 0);
750 
751 		if (pp == NULL)
752 			goto again;
753 
754 		io_off = (uint_t)io_off_tmp;
755 		io_len = (uint_t)io_len_tmp;
756 
757 		/* check for truncation */
758 		/*
759 		 * xxx Clean up and return EIO instead?
760 		 * xxx Ought to go to u_offset_t for everything, but we
761 		 * xxx call lots of things that want uint_t arguments.
762 		 */
763 		ASSERT(io_off == io_off_tmp);
764 
765 		/*
766 		 * get enough buffers for worst-case scenario
767 		 * (i.e., no coalescing possible).
768 		 */
769 		bufcnt = (len + secsize - 1) / secsize;
770 		bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
771 		vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
772 		for (count = 0; count < bufcnt; count++) {
773 			bufs[count].b_edev = devvp->v_rdev;
774 			bufs[count].b_dev = cmpdev(devvp->v_rdev);
775 			bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
776 			bufs[count].b_iodone = hsfs_iodone;
777 			bufs[count].b_vp = vp;
778 			bufs[count].b_file = vp;
779 			sema_init(&bufs[count].b_io, 0, NULL,
780 			    SEMA_DEFAULT, NULL);
781 			sema_init(&bufs[count].b_sem, 0, NULL,
782 			    SEMA_DEFAULT, NULL);
783 		}
784 
785 		/* zero not-to-be-read page parts */
786 		xlen = io_len & PAGEOFFSET;
787 		if (xlen != 0)
788 			pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
789 
790 		va = NULL;
791 		lastp = NULL;
792 		searchp = pp;
793 		io_end = io_off + io_len;
794 		for (count = 0, byte_offset = io_off;
795 			byte_offset < io_end;
796 			count++) {
797 			ASSERT(count < bufcnt);
798 
799 			/* Compute disk address for interleaving. */
800 
801 			/* considered without skips */
802 			which_chunk_lbn = byte_offset / chunk_data_bytes;
803 
804 			/* factor in skips */
805 			offset_lbn = which_chunk_lbn * chunk_lbn_count;
806 
807 			/* convert to physical byte offset for lbn */
808 			offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
809 
810 			/* don't forget offset into lbn */
811 			offset_extra = byte_offset % chunk_data_bytes;
812 
813 			/* get virtual block number for driver */
814 			driver_block = lbtodb(bof + xarsiz
815 				+ offset_bytes + offset_extra);
816 
817 			if (lastp != searchp) {
818 				/* this branch taken first time through loop */
819 				va = vas[count]
820 					= ppmapin(searchp, PROT_WRITE,
821 						(caddr_t)-1);
822 				/* ppmapin() guarantees not to return NULL */
823 			} else {
824 				vas[count] = NULL;
825 			}
826 
827 			bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
828 			bufs[count].b_offset =
829 			    (offset_t)(byte_offset - io_off + off);
830 
831 			/*
832 			 * We specifically use the b_lblkno member here
833 			 * as even in the 32 bit world driver_block can
834 			 * get very large in line with the ISO9660 spec.
835 			 */
836 
837 			bufs[count].b_lblkno = driver_block;
838 
839 			remaining_bytes = ((which_chunk_lbn + 1)
840 				* chunk_data_bytes)
841 				- byte_offset;
842 
843 			/*
844 			 * remaining_bytes can't be zero, as we derived
845 			 * which_chunk_lbn directly from byte_offset.
846 			 */
847 			if ((remaining_bytes+byte_offset) < (off+len)) {
848 				/* coalesce-read the rest of the chunk */
849 				bufs[count].b_bcount = remaining_bytes;
850 			} else {
851 				/* get the final bits */
852 				bufs[count].b_bcount = off + len - byte_offset;
853 			}
854 
855 			/*
856 			 * It would be nice to do multiple pages'
857 			 * worth at once here when the opportunity
858 			 * arises, as that has been shown to improve
859 			 * our wall time.  However, to do that
860 			 * requires that we use the pageio subsystem,
861 			 * which doesn't mix well with what we're
862 			 * already using here.  We can't use pageio
863 			 * all the time, because that subsystem
864 			 * assumes that a page is stored in N
865 			 * contiguous blocks on the device.
866 			 * Interleaving violates that assumption.
867 			 */
868 
869 			remainder = PAGESIZE - (byte_offset % PAGESIZE);
870 			if (bufs[count].b_bcount > remainder) {
871 				bufs[count].b_bcount = remainder;
872 			}
873 
874 			bufs[count].b_bufsize = bufs[count].b_bcount;
875 			byte_offset += bufs[count].b_bcount;
876 
877 			(void) bdev_strategy(&bufs[count]);
878 
879 			lwp_stat_update(LWP_STAT_INBLK, 1);
880 			lastp = searchp;
881 			if ((remainder - bufs[count].b_bcount) < 1) {
882 				searchp = searchp->p_next;
883 			}
884 		}
885 
886 		bufsused = count;
887 		/* Now wait for everything to come in */
888 		for (count = 0; count < bufsused; count++) {
889 			if (err == 0) {
890 				err = biowait(&bufs[count]);
891 			} else
892 				(void) biowait(&bufs[count]);
893 		}
894 
895 		/* Don't leak resources */
896 		for (count = 0; count < bufcnt; count++) {
897 			sema_destroy(&bufs[count].b_io);
898 			sema_destroy(&bufs[count].b_sem);
899 			if (count < bufsused && vas[count] != NULL) {
900 				ppmapout(vas[count]);
901 			}
902 		}
903 
904 		kmem_free(vas, bufcnt * sizeof (caddr_t));
905 		kmem_free(bufs, bufcnt * sizeof (struct buf));
906 	}
907 
908 	if (err) {
909 		pvn_read_done(pp, B_ERROR);
910 		return (err);
911 	}
912 
913 	/*
914 	 * Lock the requested page, and the one after it if possible.
915 	 * Don't bother if our caller hasn't given us a place to stash
916 	 * the page pointers, since otherwise we'd lock pages that would
917 	 * never get unlocked.
918 	 */
919 	if (pagefound) {
920 		int index;
921 		ulong_t soff;
922 
923 		/*
924 		 * Make sure it's in memory before we say it's here.
925 		 */
926 		if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
927 			hsfs_lostpage++;
928 			goto reread;
929 		}
930 
931 		pl[0] = pp;
932 		index = 1;
933 
934 		/*
935 		 * Try to lock the next page, if it exists, without
936 		 * blocking.
937 		 */
938 		plsz -= PAGESIZE;
939 		/* LINTED (plsz is unsigned) */
940 		for (soff = off + PAGESIZE; plsz > 0;
941 		    soff += PAGESIZE, plsz -= PAGESIZE) {
942 			pp = page_lookup_nowait(vp, (u_offset_t)soff,
943 					SE_SHARED);
944 			if (pp == NULL)
945 				break;
946 			pl[index++] = pp;
947 		}
948 		pl[index] = NULL;
949 		return (0);
950 	}
951 
952 	if (pp != NULL) {
953 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
954 	}
955 
956 	return (err);
957 }
958 
959 static int
960 hsfs_getpage(
961 	struct vnode *vp,
962 	offset_t off,
963 	size_t len,
964 	uint_t *protp,
965 	struct page *pl[],
966 	size_t plsz,
967 	struct seg *seg,
968 	caddr_t addr,
969 	enum seg_rw rw,
970 	struct cred *cred)
971 {
972 	int err;
973 	uint_t filsiz;
974 	struct hsnode *hp = VTOH(vp);
975 
976 	/* does not support write */
977 	if (rw == S_WRITE) {
978 		panic("write attempt on READ ONLY HSFS");
979 		/*NOTREACHED*/
980 	}
981 
982 	if (vp->v_flag & VNOMAP) {
983 		return (ENOSYS);
984 	}
985 
986 	ASSERT(off <= MAXOFF_T);
987 
988 	/*
989 	 * Determine file data size for EOF check.
990 	 */
991 	filsiz = hp->hs_dirent.ext_size;
992 	if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
993 		return (EFAULT);	/* beyond EOF */
994 
995 	if (protp != NULL)
996 		*protp = PROT_ALL;
997 
998 	if (len <= PAGESIZE)
999 		err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
1000 		    seg, addr, rw, cred);
1001 	else
1002 		err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
1003 		    pl, plsz, seg, addr, rw, cred);
1004 
1005 	return (err);
1006 }
1007 
1008 
1009 
1010 /*
1011  * This function should never be called. We need to have it to pass
1012  * it as an argument to other functions.
1013  */
1014 /*ARGSUSED*/
1015 int
1016 hsfs_putapage(
1017 	vnode_t		*vp,
1018 	page_t		*pp,
1019 	u_offset_t	*offp,
1020 	size_t		*lenp,
1021 	int		flags,
1022 	cred_t		*cr)
1023 {
1024 	/* should never happen - just destroy it */
1025 	cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1026 	pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1027 	return (0);
1028 }
1029 
1030 
1031 /*
1032  * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1033  * B_INVAL is set by:
1034  *
1035  *	1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1036  *	2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1037  *	   which translates to an MC_SYNC with the MS_INVALIDATE flag.
1038  *
1039  * The B_FREE (as well as the B_DONTNEED) flag is set when the
1040  * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1041  * from SEGVN to release pages behind a pagefault.
1042  */
1043 /*ARGSUSED*/
1044 static int
1045 hsfs_putpage(
1046 	struct vnode	*vp,
1047 	offset_t	off,
1048 	size_t		len,
1049 	int		flags,
1050 	struct cred	*cr)
1051 {
1052 	int error = 0;
1053 
1054 	if (vp->v_count == 0) {
1055 		panic("hsfs_putpage: bad v_count");
1056 		/*NOTREACHED*/
1057 	}
1058 
1059 	if (vp->v_flag & VNOMAP)
1060 		return (ENOSYS);
1061 
1062 	ASSERT(off <= MAXOFF_T);
1063 
1064 	if (!vn_has_cached_data(vp))	/* no pages mapped */
1065 		return (0);
1066 
1067 	if (len == 0)		/* from 'off' to EOF */
1068 		error = pvn_vplist_dirty(vp, off,
1069 					hsfs_putapage, flags, cr);
1070 	else {
1071 		offset_t end_off = off + len;
1072 		offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1073 		offset_t io_off;
1074 
1075 		file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1076 		if (end_off > file_size)
1077 			end_off = file_size;
1078 
1079 		for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1080 			page_t *pp;
1081 
1082 			/*
1083 			 * We insist on getting the page only if we are
1084 			 * about to invalidate, free or write it and
1085 			 * the B_ASYNC flag is not set.
1086 			 */
1087 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1088 				pp = page_lookup(vp, io_off,
1089 					(flags & (B_INVAL | B_FREE)) ?
1090 					    SE_EXCL : SE_SHARED);
1091 			} else {
1092 				pp = page_lookup_nowait(vp, io_off,
1093 					(flags & B_FREE) ? SE_EXCL : SE_SHARED);
1094 			}
1095 
1096 			if (pp == NULL)
1097 				continue;
1098 			/*
1099 			 * Normally pvn_getdirty() should return 0, which
1100 			 * impies that it has done the job for us.
1101 			 * The shouldn't-happen scenario is when it returns 1.
1102 			 * This means that the page has been modified and
1103 			 * needs to be put back.
1104 			 * Since we can't write on a CD, we fake a failed
1105 			 * I/O and force pvn_write_done() to destroy the page.
1106 			 */
1107 			if (pvn_getdirty(pp, flags) == 1) {
1108 				cmn_err(CE_NOTE,
1109 					"hsfs_putpage: dirty HSFS page");
1110 				pvn_write_done(pp, flags |
1111 				    B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1112 			}
1113 		}
1114 	}
1115 	return (error);
1116 }
1117 
1118 
1119 /*ARGSUSED*/
1120 static int
1121 hsfs_map(
1122 	struct vnode *vp,
1123 	offset_t off,
1124 	struct as *as,
1125 	caddr_t *addrp,
1126 	size_t len,
1127 	uchar_t prot,
1128 	uchar_t maxprot,
1129 	uint_t flags,
1130 	struct cred *cred)
1131 {
1132 	struct segvn_crargs vn_a;
1133 	int error;
1134 
1135 	/* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1136 
1137 	if (vp->v_flag & VNOMAP)
1138 		return (ENOSYS);
1139 
1140 	if (off > MAXOFF_T)
1141 		return (EFBIG);
1142 
1143 	if (off < 0 || (offset_t)(off + len) < 0)
1144 		return (EINVAL);
1145 
1146 	if (vp->v_type != VREG) {
1147 		return (ENODEV);
1148 	}
1149 
1150 	/*
1151 	 * If file is being locked, disallow mapping.
1152 	 */
1153 	if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1154 		return (EAGAIN);
1155 
1156 	as_rangelock(as);
1157 
1158 	if ((flags & MAP_FIXED) == 0) {
1159 		map_addr(addrp, len, off, 1, flags);
1160 		if (*addrp == NULL) {
1161 			as_rangeunlock(as);
1162 			return (ENOMEM);
1163 		}
1164 	} else {
1165 		/*
1166 		 * User specified address - blow away any previous mappings
1167 		 */
1168 		(void) as_unmap(as, *addrp, len);
1169 	}
1170 
1171 	vn_a.vp = vp;
1172 	vn_a.offset = off;
1173 	vn_a.type = flags & MAP_TYPE;
1174 	vn_a.prot = prot;
1175 	vn_a.maxprot = maxprot;
1176 	vn_a.flags = flags & ~MAP_TYPE;
1177 	vn_a.cred = cred;
1178 	vn_a.amp = NULL;
1179 	vn_a.szc = 0;
1180 	vn_a.lgrp_mem_policy_flags = 0;
1181 
1182 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
1183 	as_rangeunlock(as);
1184 	return (error);
1185 }
1186 
1187 /* ARGSUSED */
1188 static int
1189 hsfs_addmap(
1190 	struct vnode *vp,
1191 	offset_t off,
1192 	struct as *as,
1193 	caddr_t addr,
1194 	size_t len,
1195 	uchar_t prot,
1196 	uchar_t maxprot,
1197 	uint_t flags,
1198 	struct cred *cr)
1199 {
1200 	struct hsnode *hp;
1201 
1202 	if (vp->v_flag & VNOMAP)
1203 		return (ENOSYS);
1204 
1205 	hp = VTOH(vp);
1206 	mutex_enter(&hp->hs_contents_lock);
1207 	hp->hs_mapcnt += btopr(len);
1208 	mutex_exit(&hp->hs_contents_lock);
1209 	return (0);
1210 }
1211 
1212 /*ARGSUSED*/
1213 static int
1214 hsfs_delmap(
1215 	struct vnode *vp,
1216 	offset_t off,
1217 	struct as *as,
1218 	caddr_t addr,
1219 	size_t len,
1220 	uint_t prot,
1221 	uint_t maxprot,
1222 	uint_t flags,
1223 	struct cred *cr)
1224 {
1225 	struct hsnode *hp;
1226 
1227 	if (vp->v_flag & VNOMAP)
1228 		return (ENOSYS);
1229 
1230 	hp = VTOH(vp);
1231 	mutex_enter(&hp->hs_contents_lock);
1232 	hp->hs_mapcnt -= btopr(len);	/* Count released mappings */
1233 	ASSERT(hp->hs_mapcnt >= 0);
1234 	mutex_exit(&hp->hs_contents_lock);
1235 	return (0);
1236 }
1237 
1238 /* ARGSUSED */
1239 static int
1240 hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp)
1241 {
1242 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1243 }
1244 
1245 /* ARGSUSED */
1246 static int
1247 hsfs_frlock(
1248 	struct vnode *vp,
1249 	int cmd,
1250 	struct flock64 *bfp,
1251 	int flag,
1252 	offset_t offset,
1253 	struct flk_callback *flk_cbp,
1254 	cred_t *cr)
1255 {
1256 	struct hsnode *hp = VTOH(vp);
1257 
1258 	/*
1259 	 * If the file is being mapped, disallow fs_frlock.
1260 	 * We are not holding the hs_contents_lock while checking
1261 	 * hs_mapcnt because the current locking strategy drops all
1262 	 * locks before calling fs_frlock.
1263 	 * So, hs_mapcnt could change before we enter fs_frlock making
1264 	 * it meaningless to have held hs_contents_lock in the first place.
1265 	 */
1266 	if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1267 		return (EAGAIN);
1268 
1269 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr));
1270 }
1271 
1272 const fs_operation_def_t hsfs_vnodeops_template[] = {
1273 	VOPNAME_OPEN, hsfs_open,
1274 	VOPNAME_CLOSE, hsfs_close,
1275 	VOPNAME_READ, hsfs_read,
1276 	VOPNAME_GETATTR, hsfs_getattr,
1277 	VOPNAME_ACCESS, hsfs_access,
1278 	VOPNAME_LOOKUP, hsfs_lookup,
1279 	VOPNAME_READDIR, hsfs_readdir,
1280 	VOPNAME_READLINK, hsfs_readlink,
1281 	VOPNAME_FSYNC, hsfs_fsync,
1282 	VOPNAME_INACTIVE, (fs_generic_func_p) hsfs_inactive,
1283 	VOPNAME_FID, hsfs_fid,
1284 	VOPNAME_SEEK, hsfs_seek,
1285 	VOPNAME_FRLOCK, hsfs_frlock,
1286 	VOPNAME_GETPAGE, hsfs_getpage,
1287 	VOPNAME_PUTPAGE, hsfs_putpage,
1288 	VOPNAME_MAP, (fs_generic_func_p) hsfs_map,
1289 	VOPNAME_ADDMAP, (fs_generic_func_p) hsfs_addmap,
1290 	VOPNAME_DELMAP, hsfs_delmap,
1291 	NULL, NULL
1292 };
1293 
1294 struct vnodeops *hsfs_vnodeops;
1295