1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2017, Joyent, Inc.
25 * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
26 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
27 */
28
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
31
32 /*
33 * University Copyright- Copyright (c) 1982, 1986, 1988
34 * The Regents of the University of California
35 * All Rights Reserved
36 *
37 * University Acknowledgment- Portions of this document are derived from
38 * software developed by the University of California, Berkeley, and its
39 * contributors.
40 */
41
42 /*
43 * This file contains those functions from fs/vnode.c that can be
44 * used with relatively little change. Functions that differ
45 * significantly from that are in other files.
46 */
47
48 #include <sys/types.h>
49 #include <sys/param.h>
50 #include <sys/t_lock.h>
51 #include <sys/errno.h>
52 #include <sys/cred.h>
53 #include <sys/user.h>
54 #include <sys/uio.h>
55 #include <sys/file.h>
56 #include <sys/pathname.h>
57 #include <sys/vfs.h>
58 #include <sys/vfs_opreg.h>
59 #include <sys/vnode.h>
60 #include <sys/rwstlock.h>
61 #include <sys/fem.h>
62 #include <sys/stat.h>
63 #include <sys/mode.h>
64 #include <sys/conf.h>
65 #include <sys/sysmacros.h>
66 #include <sys/cmn_err.h>
67 #include <sys/systm.h>
68 #include <sys/kmem.h>
69 #include <sys/atomic.h>
70 #include <sys/debug.h>
71 #include <sys/acl.h>
72 #include <sys/nbmlock.h>
73 #include <sys/fcntl.h>
74 #include <sys/time.h>
75 #include <fs/fs_subr.h>
76 #include <fs/fs_reparse.h>
77
78 #include <libfksmbfs.h>
79
80 /* Determine if this vnode is a file that is read-only */
81 #define ISROFILE(vp) \
82 ((vp)->v_type != VCHR && (vp)->v_type != VBLK && \
83 (vp)->v_type != VFIFO && vn_is_readonly(vp))
84
85 #define VOPSTATS_UPDATE(vp, counter) ((void)vp)
86 #define VOPSTATS_UPDATE_IO(vp, counter, bytecounter, bytesval) \
87 ((void)vp, (void)bytesval)
88 #define VOPXID_MAP_CR(vp, cr) ((void)vp)
89
90 /*
91 * Excerpts from fs/vnode.c
92 */
93
94 /* Global used for empty/invalid v_path */
95 char *vn_vpath_empty = "";
96
97 static int fs_reparse_mark(char *target, vattr_t *vap, xvattr_t *xvattr);
98
99 /*
100 * Convert stat(2) formats to vnode types and vice versa. (Knows about
101 * numerical order of S_IFMT and vnode types.)
102 */
103 enum vtype iftovt_tab[] = {
104 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
105 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
106 };
107
108 ushort_t vttoif_tab[] = {
109 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO,
110 S_IFDOOR, 0, S_IFSOCK, S_IFPORT, 0
111 };
112
113 /*
114 * The system vnode cache.
115 */
116
117 kmem_cache_t *vn_cache;
118
119
120 /*
121 * Vnode operations vector.
122 */
123
124 static const fs_operation_trans_def_t vn_ops_table[] = {
125 VOPNAME_OPEN, offsetof(struct vnodeops, vop_open),
126 fs_nosys, fs_nosys,
127
128 VOPNAME_CLOSE, offsetof(struct vnodeops, vop_close),
129 fs_nosys, fs_nosys,
130
131 VOPNAME_READ, offsetof(struct vnodeops, vop_read),
132 fs_nosys, fs_nosys,
133
134 VOPNAME_WRITE, offsetof(struct vnodeops, vop_write),
135 fs_nosys, fs_nosys,
136
137 VOPNAME_IOCTL, offsetof(struct vnodeops, vop_ioctl),
138 fs_nosys, fs_nosys,
139
140 VOPNAME_SETFL, offsetof(struct vnodeops, vop_setfl),
141 fs_setfl, fs_nosys,
142
143 VOPNAME_GETATTR, offsetof(struct vnodeops, vop_getattr),
144 fs_nosys, fs_nosys,
145
146 VOPNAME_SETATTR, offsetof(struct vnodeops, vop_setattr),
147 fs_nosys, fs_nosys,
148
149 VOPNAME_ACCESS, offsetof(struct vnodeops, vop_access),
150 fs_nosys, fs_nosys,
151
152 VOPNAME_LOOKUP, offsetof(struct vnodeops, vop_lookup),
153 fs_nosys, fs_nosys,
154
155 VOPNAME_CREATE, offsetof(struct vnodeops, vop_create),
156 fs_nosys, fs_nosys,
157
158 VOPNAME_REMOVE, offsetof(struct vnodeops, vop_remove),
159 fs_nosys, fs_nosys,
160
161 VOPNAME_LINK, offsetof(struct vnodeops, vop_link),
162 fs_nosys, fs_nosys,
163
164 VOPNAME_RENAME, offsetof(struct vnodeops, vop_rename),
165 fs_nosys, fs_nosys,
166
167 VOPNAME_MKDIR, offsetof(struct vnodeops, vop_mkdir),
168 fs_nosys, fs_nosys,
169
170 VOPNAME_RMDIR, offsetof(struct vnodeops, vop_rmdir),
171 fs_nosys, fs_nosys,
172
173 VOPNAME_READDIR, offsetof(struct vnodeops, vop_readdir),
174 fs_nosys, fs_nosys,
175
176 VOPNAME_SYMLINK, offsetof(struct vnodeops, vop_symlink),
177 fs_nosys, fs_nosys,
178
179 VOPNAME_READLINK, offsetof(struct vnodeops, vop_readlink),
180 fs_nosys, fs_nosys,
181
182 VOPNAME_FSYNC, offsetof(struct vnodeops, vop_fsync),
183 fs_nosys, fs_nosys,
184
185 VOPNAME_INACTIVE, offsetof(struct vnodeops, vop_inactive),
186 fs_nosys, fs_nosys,
187
188 VOPNAME_FID, offsetof(struct vnodeops, vop_fid),
189 fs_nosys, fs_nosys,
190
191 VOPNAME_RWLOCK, offsetof(struct vnodeops, vop_rwlock),
192 fs_rwlock, fs_rwlock,
193
194 VOPNAME_RWUNLOCK, offsetof(struct vnodeops, vop_rwunlock),
195 (fs_generic_func_p)(uintptr_t)fs_rwunlock,
196 (fs_generic_func_p)(intptr_t)fs_rwunlock, /* no errors allowed */
197
198 VOPNAME_SEEK, offsetof(struct vnodeops, vop_seek),
199 fs_nosys, fs_nosys,
200
201 VOPNAME_CMP, offsetof(struct vnodeops, vop_cmp),
202 fs_cmp, fs_cmp, /* no errors allowed */
203
204 VOPNAME_FRLOCK, offsetof(struct vnodeops, vop_frlock),
205 fs_frlock, fs_nosys,
206
207 VOPNAME_SPACE, offsetof(struct vnodeops, vop_space),
208 fs_nosys, fs_nosys,
209
210 VOPNAME_REALVP, offsetof(struct vnodeops, vop_realvp),
211 fs_nosys, fs_nosys,
212
213 VOPNAME_GETPAGE, offsetof(struct vnodeops, vop_getpage),
214 fs_nosys, fs_nosys,
215
216 VOPNAME_PUTPAGE, offsetof(struct vnodeops, vop_putpage),
217 fs_nosys, fs_nosys,
218
219 VOPNAME_MAP, offsetof(struct vnodeops, vop_map),
220 (fs_generic_func_p) fs_nosys_map,
221 (fs_generic_func_p) fs_nosys_map,
222
223 VOPNAME_ADDMAP, offsetof(struct vnodeops, vop_addmap),
224 (fs_generic_func_p) fs_nosys_addmap,
225 (fs_generic_func_p) fs_nosys_addmap,
226
227 VOPNAME_DELMAP, offsetof(struct vnodeops, vop_delmap),
228 fs_nosys, fs_nosys,
229
230 VOPNAME_POLL, offsetof(struct vnodeops, vop_poll),
231 (fs_generic_func_p) fs_poll, (fs_generic_func_p) fs_nosys_poll,
232
233 VOPNAME_DUMP, offsetof(struct vnodeops, vop_dump),
234 fs_nosys, fs_nosys,
235
236 VOPNAME_PATHCONF, offsetof(struct vnodeops, vop_pathconf),
237 fs_pathconf, fs_nosys,
238
239 VOPNAME_PAGEIO, offsetof(struct vnodeops, vop_pageio),
240 fs_nosys, fs_nosys,
241
242 VOPNAME_DUMPCTL, offsetof(struct vnodeops, vop_dumpctl),
243 fs_nosys, fs_nosys,
244
245 VOPNAME_DISPOSE, offsetof(struct vnodeops, vop_dispose),
246 (fs_generic_func_p)(intptr_t)fs_dispose,
247 (fs_generic_func_p)(intptr_t)fs_nodispose,
248
249 VOPNAME_SETSECATTR, offsetof(struct vnodeops, vop_setsecattr),
250 fs_nosys, fs_nosys,
251
252 VOPNAME_GETSECATTR, offsetof(struct vnodeops, vop_getsecattr),
253 fs_fab_acl, fs_nosys,
254
255 VOPNAME_SHRLOCK, offsetof(struct vnodeops, vop_shrlock),
256 fs_shrlock, fs_nosys,
257
258 VOPNAME_VNEVENT, offsetof(struct vnodeops, vop_vnevent),
259 (fs_generic_func_p) fs_vnevent_nosupport,
260 (fs_generic_func_p) fs_vnevent_nosupport,
261
262 VOPNAME_REQZCBUF, offsetof(struct vnodeops, vop_reqzcbuf),
263 fs_nosys, fs_nosys,
264
265 VOPNAME_RETZCBUF, offsetof(struct vnodeops, vop_retzcbuf),
266 fs_nosys, fs_nosys,
267
268 NULL, 0, NULL, NULL
269 };
270
271 /* Extensible attribute (xva) routines. */
272
273 /*
274 * Zero out the structure, set the size of the requested/returned bitmaps,
275 * set AT_XVATTR in the embedded vattr_t's va_mask, and set up the pointer
276 * to the returned attributes array.
277 */
278 void
xva_init(xvattr_t * xvap)279 xva_init(xvattr_t *xvap)
280 {
281 bzero(xvap, sizeof (xvattr_t));
282 xvap->xva_mapsize = XVA_MAPSIZE;
283 xvap->xva_magic = XVA_MAGIC;
284 xvap->xva_vattr.va_mask = AT_XVATTR;
285 xvap->xva_rtnattrmapp = &(xvap->xva_rtnattrmap)[0];
286 }
287
288 /*
289 * If AT_XVATTR is set, returns a pointer to the embedded xoptattr_t
290 * structure. Otherwise, returns NULL.
291 */
292 xoptattr_t *
xva_getxoptattr(xvattr_t * xvap)293 xva_getxoptattr(xvattr_t *xvap)
294 {
295 xoptattr_t *xoap = NULL;
296 if (xvap->xva_vattr.va_mask & AT_XVATTR)
297 xoap = &xvap->xva_xoptattrs;
298 return (xoap);
299 }
300
301 // vska_compar
302 // create_vopstats_template
303 // new_vskstat
304 // vopstats_startup
305 // initialize_vopstats
306 // get_fstype_vopstats
307 // get_vskstat_anchor
308 // teardown_vopstats
309
310 /*
311 * Read or write a vnode. Called from kernel code.
312 */
313 int
vn_rdwr(enum uio_rw rw,struct vnode * vp,caddr_t base,ssize_t len,offset_t offset,enum uio_seg seg,int ioflag,rlim64_t ulimit,cred_t * cr,ssize_t * residp)314 vn_rdwr(
315 enum uio_rw rw,
316 struct vnode *vp,
317 caddr_t base,
318 ssize_t len,
319 offset_t offset,
320 enum uio_seg seg,
321 int ioflag,
322 rlim64_t ulimit, /* meaningful only if rw is UIO_WRITE */
323 cred_t *cr,
324 ssize_t *residp)
325 {
326 struct uio uio;
327 struct iovec iov;
328 int error;
329 int in_crit = 0;
330
331 if (rw == UIO_WRITE && ISROFILE(vp))
332 return (EROFS);
333
334 if (len < 0)
335 return (EIO);
336
337 VOPXID_MAP_CR(vp, cr);
338
339 iov.iov_base = base;
340 iov.iov_len = len;
341 uio.uio_iov = &iov;
342 uio.uio_iovcnt = 1;
343 uio.uio_loffset = offset;
344 uio.uio_segflg = (short)seg;
345 uio.uio_resid = len;
346 uio.uio_llimit = ulimit;
347
348 /*
349 * We have to enter the critical region before calling VOP_RWLOCK
350 * to avoid a deadlock with ufs.
351 */
352 if (nbl_need_check(vp)) {
353 int svmand;
354
355 nbl_start_crit(vp, RW_READER);
356 in_crit = 1;
357 error = nbl_svmand(vp, cr, &svmand);
358 if (error != 0)
359 goto done;
360 if (nbl_conflict(vp, rw == UIO_WRITE ? NBL_WRITE : NBL_READ,
361 uio.uio_offset, uio.uio_resid, svmand, NULL)) {
362 error = EACCES;
363 goto done;
364 }
365 }
366
367 (void) VOP_RWLOCK(vp,
368 rw == UIO_WRITE ? V_WRITELOCK_TRUE : V_WRITELOCK_FALSE, NULL);
369 if (rw == UIO_WRITE) {
370 uio.uio_fmode = FWRITE;
371 uio.uio_extflg = UIO_COPY_DEFAULT;
372 error = VOP_WRITE(vp, &uio, ioflag, cr, NULL);
373 } else {
374 uio.uio_fmode = FREAD;
375 uio.uio_extflg = UIO_COPY_CACHED;
376 error = VOP_READ(vp, &uio, ioflag, cr, NULL);
377 }
378 VOP_RWUNLOCK(vp,
379 rw == UIO_WRITE ? V_WRITELOCK_TRUE : V_WRITELOCK_FALSE, NULL);
380 if (residp)
381 *residp = uio.uio_resid;
382 else if (uio.uio_resid)
383 error = EIO;
384
385 done:
386 if (in_crit)
387 nbl_end_crit(vp);
388 return (error);
389 }
390
391 /*
392 * Incremend the hold on a vnode
393 * (Real kernel uses a macro)
394 */
395 void
vn_hold(struct vnode * vp)396 vn_hold(struct vnode *vp)
397 {
398 mutex_enter(&vp->v_lock);
399 (vp)->v_count++;
400 mutex_exit(&vp->v_lock);
401 }
402
403 /*
404 * Release a vnode. Call VOP_INACTIVE on last reference or
405 * decrement reference count...
406 */
407 void
vn_rele(vnode_t * vp)408 vn_rele(vnode_t *vp)
409 {
410 VERIFY(vp->v_count > 0);
411 mutex_enter(&vp->v_lock);
412 if (vp->v_count == 1) {
413 mutex_exit(&vp->v_lock);
414 VOP_INACTIVE(vp, CRED(), NULL);
415 return;
416 }
417 VN_RELE_LOCKED(vp);
418 mutex_exit(&vp->v_lock);
419 }
420
421 // vn_rele_dnlc
422 // vn_rele_stream
423 // vn_rele_inactive
424 // vn_rele_async
425 // vn_open, vn_openat
426 // vn_open_upgrade
427 // vn_open_downgrade
428 // vn_create, vn_createat
429 // vn_link, vn_linkat
430 // vn_rename, vn_renameat
431 // vn_remove, vn_removeat
432
433
434 /*
435 * Utility function to compare equality of vnodes.
436 * Compare the underlying real vnodes, if there are underlying vnodes.
437 * This is a more thorough comparison than the VN_CMP() macro provides.
438 */
439 int
vn_compare(vnode_t * vp1,vnode_t * vp2)440 vn_compare(vnode_t *vp1, vnode_t *vp2)
441 {
442 vnode_t *realvp;
443
444 if (vp1 != NULL && VOP_REALVP(vp1, &realvp, NULL) == 0)
445 vp1 = realvp;
446 if (vp2 != NULL && VOP_REALVP(vp2, &realvp, NULL) == 0)
447 vp2 = realvp;
448 return (VN_CMP(vp1, vp2));
449 }
450
451 // vn_vfslocks_buckets
452 // vn_vfslocks_getlock
453 // vn_vfslocks_rele
454
455 static krwlock_t vfsentry_ve_lock;
456
457 /*
458 * vn_vfswlock_wait is used to implement a lock which is logically a
459 * writers lock protecting the v_vfsmountedhere field.
460 * vn_vfswlock_wait has been modified to be similar to vn_vfswlock,
461 * except that it blocks to acquire the lock VVFSLOCK.
462 *
463 * traverse() and routines re-implementing part of traverse (e.g. autofs)
464 * need to hold this lock. mount(), vn_rename(), vn_remove() and so on
465 * need the non-blocking version of the writers lock i.e. vn_vfswlock
466 */
467 int
vn_vfswlock_wait(vnode_t * vp)468 vn_vfswlock_wait(vnode_t *vp)
469 {
470
471 ASSERT(vp != NULL);
472
473 rw_enter(&vfsentry_ve_lock, RW_WRITER);
474
475 return (0);
476 }
477
478 int
vn_vfsrlock_wait(vnode_t * vp)479 vn_vfsrlock_wait(vnode_t *vp)
480 {
481
482 ASSERT(vp != NULL);
483
484 rw_enter(&vfsentry_ve_lock, RW_READER);
485
486 return (0);
487 }
488
489 /*
490 * vn_vfswlock is used to implement a lock which is logically a writers lock
491 * protecting the v_vfsmountedhere field.
492 */
493 int
vn_vfswlock(vnode_t * vp)494 vn_vfswlock(vnode_t *vp)
495 {
496
497 if (vp == NULL)
498 return (EBUSY);
499
500 if (rw_tryenter(&vfsentry_ve_lock, RW_WRITER))
501 return (0);
502
503 return (EBUSY);
504 }
505
506 int
vn_vfsrlock(vnode_t * vp)507 vn_vfsrlock(vnode_t *vp)
508 {
509
510 if (vp == NULL)
511 return (EBUSY);
512
513 if (rw_tryenter(&vfsentry_ve_lock, RW_READER))
514 return (0);
515
516 return (EBUSY);
517 }
518
519 void
vn_vfsunlock(vnode_t * vp)520 vn_vfsunlock(vnode_t *vp)
521 {
522
523 rw_exit(&vfsentry_ve_lock);
524 }
525
526 int
vn_vfswlock_held(vnode_t * vp)527 vn_vfswlock_held(vnode_t *vp)
528 {
529 int held;
530
531 ASSERT(vp != NULL);
532
533 held = rw_write_held(&vfsentry_ve_lock);
534
535 return (held);
536 }
537
538
539 int
vn_make_ops(const char * name,const fs_operation_def_t * templ,vnodeops_t ** actual)540 vn_make_ops(
541 const char *name, /* Name of file system */
542 const fs_operation_def_t *templ, /* Operation specification */
543 vnodeops_t **actual) /* Return the vnodeops */
544 {
545 int unused_ops;
546 int error;
547
548 *actual = (vnodeops_t *)kmem_alloc(sizeof (vnodeops_t), KM_SLEEP);
549
550 (*actual)->vnop_name = name;
551
552 error = fs_build_vector(*actual, &unused_ops, vn_ops_table, templ);
553 if (error) {
554 kmem_free(*actual, sizeof (vnodeops_t));
555 }
556
557 #if DEBUG
558 if (unused_ops != 0)
559 cmn_err(CE_WARN, "vn_make_ops: %s: %d operations supplied "
560 "but not used", name, unused_ops);
561 #endif
562
563 return (error);
564 }
565
566 /*
567 * Free the vnodeops created as a result of vn_make_ops()
568 */
569 void
vn_freevnodeops(vnodeops_t * vnops)570 vn_freevnodeops(vnodeops_t *vnops)
571 {
572 kmem_free(vnops, sizeof (vnodeops_t));
573 }
574
575 /*
576 * Vnode cache.
577 */
578
579 /* ARGSUSED */
580 static int
vn_cache_constructor(void * buf,void * cdrarg,int kmflags)581 vn_cache_constructor(void *buf, void *cdrarg, int kmflags)
582 {
583 struct vnode *vp = buf;
584
585 bzero(vp, sizeof (*vp));
586 mutex_init(&vp->v_lock, NULL, MUTEX_DEFAULT, NULL);
587 rw_init(&vp->v_nbllock, NULL, RW_DEFAULT, NULL);
588 vp->v_path = vn_vpath_empty;
589 vp->v_fd = -1;
590 vp->v_st_dev = NODEV;
591
592 return (0);
593 }
594
595 /* ARGSUSED */
596 static void
vn_cache_destructor(void * buf,void * cdrarg)597 vn_cache_destructor(void *buf, void *cdrarg)
598 {
599 struct vnode *vp;
600
601 vp = buf;
602
603 rw_destroy(&vp->v_nbllock);
604 mutex_destroy(&vp->v_lock);
605 }
606
607 void
vn_create_cache(void)608 vn_create_cache(void)
609 {
610 vn_cache = kmem_cache_create("vn_cache", sizeof (struct vnode),
611 VNODE_ALIGN, vn_cache_constructor, vn_cache_destructor, NULL, NULL,
612 NULL, 0);
613 }
614
615 void
vn_destroy_cache(void)616 vn_destroy_cache(void)
617 {
618 kmem_cache_destroy(vn_cache);
619 }
620
621 /*
622 * Used by file systems when fs-specific nodes (e.g., ufs inodes) are
623 * cached by the file system and vnodes remain associated.
624 */
625 void
vn_recycle(vnode_t * vp)626 vn_recycle(vnode_t *vp)
627 {
628 VERIFY(vp->v_path != NULL);
629
630 /*
631 * XXX - This really belongs in vn_reinit(), but we have some issues
632 * with the counts. Best to have it here for clean initialization.
633 */
634 vp->v_rdcnt = 0;
635 vp->v_wrcnt = 0;
636
637 /*
638 * If FEM was in use...
639 */
640
641 if (vp->v_path != vn_vpath_empty) {
642 kmem_free(vp->v_path, strlen(vp->v_path) + 1);
643 vp->v_path = vn_vpath_empty;
644 }
645 // vsd_free(vp);
646 }
647
648 /*
649 * Used to reset the vnode fields including those that are directly accessible
650 * as well as those which require an accessor function.
651 */
652 void
vn_reinit(vnode_t * vp)653 vn_reinit(vnode_t *vp)
654 {
655 vp->v_count = 1;
656 // vp->v_count_dnlc = 0;
657 vp->v_vfsp = NULL;
658 vp->v_stream = NULL;
659 vp->v_vfsmountedhere = NULL;
660 vp->v_flag = 0;
661 vp->v_type = VNON;
662 vp->v_rdev = NODEV;
663
664 vp->v_xattrdir = NULL;
665
666 /*
667 * In a few specific instances, vn_reinit() is used to initialize
668 * locally defined vnode_t instances. Lacking the construction offered
669 * by vn_alloc(), these vnodes require v_path initialization.
670 */
671 if (vp->v_path == NULL) {
672 vp->v_path = vn_vpath_empty;
673 }
674
675 /* Handles v_femhead, v_path, and the r/w/map counts */
676 vn_recycle(vp);
677 }
678
679 vnode_t *
vn_alloc(int kmflag)680 vn_alloc(int kmflag)
681 {
682 vnode_t *vp;
683
684 vp = kmem_cache_alloc(vn_cache, kmflag);
685
686 if (vp != NULL) {
687 // vp->v_femhead = NULL; /* Must be done before vn_reinit() */
688 // vp->v_fopdata = NULL;
689 vn_reinit(vp);
690 }
691
692 return (vp);
693 }
694
695 void
vn_free(vnode_t * vp)696 vn_free(vnode_t *vp)
697 {
698 extern vnode_t *rootdir;
699 ASSERT(vp != rootdir);
700
701 /*
702 * Some file systems call vn_free() with v_count of zero,
703 * some with v_count of 1. In any case, the value should
704 * never be anything else.
705 */
706 ASSERT((vp->v_count == 0) || (vp->v_count == 1));
707 VERIFY(vp->v_path != NULL);
708 if (vp->v_path != vn_vpath_empty) {
709 kmem_free(vp->v_path, strlen(vp->v_path) + 1);
710 vp->v_path = vn_vpath_empty;
711 }
712
713 /* If FEM was in use... */
714
715 // vsd_free(vp);
716 kmem_cache_free(vn_cache, vp);
717 }
718
719 /*
720 * vnode status changes, should define better states than 1, 0.
721 */
722 void
vn_reclaim(vnode_t * vp)723 vn_reclaim(vnode_t *vp)
724 {
725 vfs_t *vfsp = vp->v_vfsp;
726
727 if (vfsp == NULL ||
728 vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
729 return;
730 }
731 (void) VFS_VNSTATE(vfsp, vp, VNTRANS_RECLAIMED);
732 }
733
734 void
vn_idle(vnode_t * vp)735 vn_idle(vnode_t *vp)
736 {
737 vfs_t *vfsp = vp->v_vfsp;
738
739 if (vfsp == NULL ||
740 vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
741 return;
742 }
743 (void) VFS_VNSTATE(vfsp, vp, VNTRANS_IDLED);
744 }
745 void
vn_exists(vnode_t * vp)746 vn_exists(vnode_t *vp)
747 {
748 vfs_t *vfsp = vp->v_vfsp;
749
750 if (vfsp == NULL ||
751 vfsp->vfs_implp == NULL || vfsp->vfs_femhead == NULL) {
752 return;
753 }
754 (void) VFS_VNSTATE(vfsp, vp, VNTRANS_EXISTS);
755 }
756
757 void
vn_invalid(vnode_t * vp)758 vn_invalid(vnode_t *vp)
759 {
760 }
761
762 /* Vnode event notification */
763 // vnevent_support()
764 // vnevent_...
765
766 /*
767 * Vnode accessors.
768 */
769
770 int
vn_is_readonly(vnode_t * vp)771 vn_is_readonly(vnode_t *vp)
772 {
773 return (vp->v_vfsp->vfs_flag & VFS_RDONLY);
774 }
775
776 int
vn_has_flocks(vnode_t * vp)777 vn_has_flocks(vnode_t *vp)
778 {
779 return (0);
780 }
781
782 int
vn_has_mandatory_locks(vnode_t * vp,int mode)783 vn_has_mandatory_locks(vnode_t *vp, int mode)
784 {
785 return (0);
786 }
787
788 int
vn_has_cached_data(vnode_t * vp)789 vn_has_cached_data(vnode_t *vp)
790 {
791 return (0);
792 }
793
794 // vn_can_change_zones
795
796 /*
797 * Return nonzero if the vnode is a mount point, zero if not.
798 */
799 int
vn_ismntpt(vnode_t * vp)800 vn_ismntpt(vnode_t *vp)
801 {
802 return (vp->v_vfsmountedhere != NULL);
803 }
804
805 /* Retrieve the vfs (if any) mounted on this vnode */
806 vfs_t *
vn_mountedvfs(vnode_t * vp)807 vn_mountedvfs(vnode_t *vp)
808 {
809 return (vp->v_vfsmountedhere);
810 }
811
812 /*
813 * Return nonzero if the vnode is referenced by the dnlc, zero if not.
814 * (no DNLC here)
815 */
816 int
vn_in_dnlc(vnode_t * vp)817 vn_in_dnlc(vnode_t *vp)
818 {
819 return (0);
820 }
821
822
823 /*
824 * vn_has_other_opens() checks whether a particular file is opened by more than
825 * just the caller and whether the open is for read and/or write.
826 * This routine is for calling after the caller has already called VOP_OPEN()
827 * and the caller wishes to know if they are the only one with it open for
828 * the mode(s) specified.
829 *
830 * Vnode counts are only kept on regular files (v_type=VREG).
831 */
832 int
vn_has_other_opens(vnode_t * vp,v_mode_t mode)833 vn_has_other_opens(
834 vnode_t *vp,
835 v_mode_t mode)
836 {
837
838 ASSERT(vp != NULL);
839
840 switch (mode) {
841 case V_WRITE:
842 if (vp->v_wrcnt > 1)
843 return (V_TRUE);
844 break;
845 case V_RDORWR:
846 if ((vp->v_rdcnt > 1) || (vp->v_wrcnt > 1))
847 return (V_TRUE);
848 break;
849 case V_RDANDWR:
850 if ((vp->v_rdcnt > 1) && (vp->v_wrcnt > 1))
851 return (V_TRUE);
852 break;
853 case V_READ:
854 if (vp->v_rdcnt > 1)
855 return (V_TRUE);
856 break;
857 }
858
859 return (V_FALSE);
860 }
861
862 /*
863 * vn_is_opened() checks whether a particular file is opened and
864 * whether the open is for read and/or write.
865 *
866 * Vnode counts are only kept on regular files (v_type=VREG).
867 */
868 int
vn_is_opened(vnode_t * vp,v_mode_t mode)869 vn_is_opened(
870 vnode_t *vp,
871 v_mode_t mode)
872 {
873
874 ASSERT(vp != NULL);
875
876 switch (mode) {
877 case V_WRITE:
878 if (vp->v_wrcnt)
879 return (V_TRUE);
880 break;
881 case V_RDANDWR:
882 if (vp->v_rdcnt && vp->v_wrcnt)
883 return (V_TRUE);
884 break;
885 case V_RDORWR:
886 if (vp->v_rdcnt || vp->v_wrcnt)
887 return (V_TRUE);
888 break;
889 case V_READ:
890 if (vp->v_rdcnt)
891 return (V_TRUE);
892 break;
893 }
894
895 return (V_FALSE);
896 }
897
898 /*
899 * vn_is_mapped() checks whether a particular file is mapped and whether
900 * the file is mapped read and/or write. (no mmap here)
901 */
902 int
vn_is_mapped(vnode_t * vp,v_mode_t mode)903 vn_is_mapped(
904 vnode_t *vp,
905 v_mode_t mode)
906 {
907 return (V_FALSE);
908 }
909
910 /*
911 * Set the operations vector for a vnode.
912 */
913 void
vn_setops(vnode_t * vp,vnodeops_t * vnodeops)914 vn_setops(vnode_t *vp, vnodeops_t *vnodeops)
915 {
916
917 ASSERT(vp != NULL);
918 ASSERT(vnodeops != NULL);
919
920 vp->v_op = vnodeops;
921 }
922
923 /*
924 * Retrieve the operations vector for a vnode
925 */
926 vnodeops_t *
vn_getops(vnode_t * vp)927 vn_getops(vnode_t *vp)
928 {
929
930 ASSERT(vp != NULL);
931
932 return (vp->v_op);
933 }
934
935 /*
936 * Returns non-zero (1) if the vnodeops matches that of the vnode.
937 * Returns zero (0) if not.
938 */
939 int
vn_matchops(vnode_t * vp,vnodeops_t * vnodeops)940 vn_matchops(vnode_t *vp, vnodeops_t *vnodeops)
941 {
942 return (vn_getops(vp) == vnodeops);
943 }
944
945 // vn_matchopval
946 // fs_new_caller_id
947
948 // vn_clearpath
949 // vn_setpath_common
950
951 /* ARGSUSED */
952 void
vn_updatepath(vnode_t * pvp,vnode_t * vp,const char * name)953 vn_updatepath(vnode_t *pvp, vnode_t *vp, const char *name)
954 {
955 }
956
957 // vn_setpath...
958 // vn_renamepath
959 // vn_copypath
960
961 // vn_vmpss_usepageio
962
963 /* VOP_XXX() macros call the corresponding fop_xxx() function */
964
965 int
fop_open(vnode_t ** vpp,int mode,cred_t * cr,caller_context_t * ct)966 fop_open(
967 vnode_t **vpp,
968 int mode,
969 cred_t *cr,
970 caller_context_t *ct)
971 {
972 int ret;
973 vnode_t *vp = *vpp;
974
975 VN_HOLD(vp);
976 /*
977 * Adding to the vnode counts before calling open
978 * avoids the need for a mutex...
979 */
980 if ((*vpp)->v_type == VREG) {
981 if (mode & FREAD)
982 atomic_inc_32(&(*vpp)->v_rdcnt);
983 if (mode & FWRITE)
984 atomic_inc_32(&(*vpp)->v_wrcnt);
985 }
986
987 VOPXID_MAP_CR(vp, cr);
988
989 ret = (*(*(vpp))->v_op->vop_open)(vpp, mode, cr, ct);
990
991 if (ret) {
992 /*
993 * Use the saved vp just in case the vnode ptr got trashed
994 * by the error.
995 */
996 VOPSTATS_UPDATE(vp, open);
997 if ((vp->v_type == VREG) && (mode & FREAD))
998 atomic_dec_32(&vp->v_rdcnt);
999 if ((vp->v_type == VREG) && (mode & FWRITE))
1000 atomic_dec_32(&vp->v_wrcnt);
1001 } else {
1002 /*
1003 * Some filesystems will return a different vnode,
1004 * but the same path was still used to open it.
1005 * So if we do change the vnode and need to
1006 * copy over the path, do so here, rather than special
1007 * casing each filesystem. Adjust the vnode counts to
1008 * reflect the vnode switch.
1009 */
1010 VOPSTATS_UPDATE(*vpp, open);
1011 if (*vpp != vp && *vpp != NULL) {
1012 // vn_copypath(vp, *vpp);
1013 if (((*vpp)->v_type == VREG) && (mode & FREAD))
1014 atomic_inc_32(&(*vpp)->v_rdcnt);
1015 if ((vp->v_type == VREG) && (mode & FREAD))
1016 atomic_dec_32(&vp->v_rdcnt);
1017 if (((*vpp)->v_type == VREG) && (mode & FWRITE))
1018 atomic_inc_32(&(*vpp)->v_wrcnt);
1019 if ((vp->v_type == VREG) && (mode & FWRITE))
1020 atomic_dec_32(&vp->v_wrcnt);
1021 }
1022 }
1023 VN_RELE(vp);
1024 return (ret);
1025 }
1026
1027 int
fop_close(vnode_t * vp,int flag,int count,offset_t offset,cred_t * cr,caller_context_t * ct)1028 fop_close(
1029 vnode_t *vp,
1030 int flag,
1031 int count,
1032 offset_t offset,
1033 cred_t *cr,
1034 caller_context_t *ct)
1035 {
1036 int err;
1037
1038 VOPXID_MAP_CR(vp, cr);
1039
1040 err = (*(vp)->v_op->vop_close)(vp, flag, count, offset, cr, ct);
1041 VOPSTATS_UPDATE(vp, close);
1042 /*
1043 * Check passed in count to handle possible dups. Vnode counts are only
1044 * kept on regular files
1045 */
1046 if ((vp->v_type == VREG) && (count == 1)) {
1047 if (flag & FREAD) {
1048 ASSERT(vp->v_rdcnt > 0);
1049 atomic_dec_32(&vp->v_rdcnt);
1050 }
1051 if (flag & FWRITE) {
1052 ASSERT(vp->v_wrcnt > 0);
1053 atomic_dec_32(&vp->v_wrcnt);
1054 }
1055 }
1056 return (err);
1057 }
1058
1059 int
fop_read(vnode_t * vp,uio_t * uiop,int ioflag,cred_t * cr,caller_context_t * ct)1060 fop_read(
1061 vnode_t *vp,
1062 uio_t *uiop,
1063 int ioflag,
1064 cred_t *cr,
1065 caller_context_t *ct)
1066 {
1067 int err;
1068 ssize_t resid_start = uiop->uio_resid;
1069
1070 VOPXID_MAP_CR(vp, cr);
1071
1072 err = (*(vp)->v_op->vop_read)(vp, uiop, ioflag, cr, ct);
1073 VOPSTATS_UPDATE_IO(vp, read,
1074 read_bytes, (resid_start - uiop->uio_resid));
1075 return (err);
1076 }
1077
1078 int
fop_write(vnode_t * vp,uio_t * uiop,int ioflag,cred_t * cr,caller_context_t * ct)1079 fop_write(
1080 vnode_t *vp,
1081 uio_t *uiop,
1082 int ioflag,
1083 cred_t *cr,
1084 caller_context_t *ct)
1085 {
1086 int err;
1087 ssize_t resid_start = uiop->uio_resid;
1088
1089 VOPXID_MAP_CR(vp, cr);
1090
1091 err = (*(vp)->v_op->vop_write)(vp, uiop, ioflag, cr, ct);
1092 VOPSTATS_UPDATE_IO(vp, write,
1093 write_bytes, (resid_start - uiop->uio_resid));
1094 return (err);
1095 }
1096
1097 int
fop_ioctl(vnode_t * vp,int cmd,intptr_t arg,int flag,cred_t * cr,int * rvalp,caller_context_t * ct)1098 fop_ioctl(
1099 vnode_t *vp,
1100 int cmd,
1101 intptr_t arg,
1102 int flag,
1103 cred_t *cr,
1104 int *rvalp,
1105 caller_context_t *ct)
1106 {
1107 int err;
1108
1109 VOPXID_MAP_CR(vp, cr);
1110
1111 err = (*(vp)->v_op->vop_ioctl)(vp, cmd, arg, flag, cr, rvalp, ct);
1112 VOPSTATS_UPDATE(vp, ioctl);
1113 return (err);
1114 }
1115
1116 int
fop_setfl(vnode_t * vp,int oflags,int nflags,cred_t * cr,caller_context_t * ct)1117 fop_setfl(
1118 vnode_t *vp,
1119 int oflags,
1120 int nflags,
1121 cred_t *cr,
1122 caller_context_t *ct)
1123 {
1124 int err;
1125
1126 VOPXID_MAP_CR(vp, cr);
1127
1128 err = (*(vp)->v_op->vop_setfl)(vp, oflags, nflags, cr, ct);
1129 VOPSTATS_UPDATE(vp, setfl);
1130 return (err);
1131 }
1132
1133 int
fop_getattr(vnode_t * vp,vattr_t * vap,int flags,cred_t * cr,caller_context_t * ct)1134 fop_getattr(
1135 vnode_t *vp,
1136 vattr_t *vap,
1137 int flags,
1138 cred_t *cr,
1139 caller_context_t *ct)
1140 {
1141 int err;
1142
1143 VOPXID_MAP_CR(vp, cr);
1144
1145 /*
1146 * If this file system doesn't understand the xvattr extensions
1147 * then turn off the xvattr bit.
1148 */
1149 if (vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR) == 0) {
1150 vap->va_mask &= ~AT_XVATTR;
1151 }
1152
1153 /*
1154 * We're only allowed to skip the ACL check iff we used a 32 bit
1155 * ACE mask with VOP_ACCESS() to determine permissions.
1156 */
1157 if ((flags & ATTR_NOACLCHECK) &&
1158 vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
1159 return (EINVAL);
1160 }
1161 err = (*(vp)->v_op->vop_getattr)(vp, vap, flags, cr, ct);
1162 VOPSTATS_UPDATE(vp, getattr);
1163 return (err);
1164 }
1165
1166 int
fop_setattr(vnode_t * vp,vattr_t * vap,int flags,cred_t * cr,caller_context_t * ct)1167 fop_setattr(
1168 vnode_t *vp,
1169 vattr_t *vap,
1170 int flags,
1171 cred_t *cr,
1172 caller_context_t *ct)
1173 {
1174 int err;
1175
1176 VOPXID_MAP_CR(vp, cr);
1177
1178 /*
1179 * If this file system doesn't understand the xvattr extensions
1180 * then turn off the xvattr bit.
1181 */
1182 if (vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR) == 0) {
1183 vap->va_mask &= ~AT_XVATTR;
1184 }
1185
1186 /*
1187 * We're only allowed to skip the ACL check iff we used a 32 bit
1188 * ACE mask with VOP_ACCESS() to determine permissions.
1189 */
1190 if ((flags & ATTR_NOACLCHECK) &&
1191 vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
1192 return (EINVAL);
1193 }
1194 err = (*(vp)->v_op->vop_setattr)(vp, vap, flags, cr, ct);
1195 VOPSTATS_UPDATE(vp, setattr);
1196 return (err);
1197 }
1198
1199 int
fop_access(vnode_t * vp,int mode,int flags,cred_t * cr,caller_context_t * ct)1200 fop_access(
1201 vnode_t *vp,
1202 int mode,
1203 int flags,
1204 cred_t *cr,
1205 caller_context_t *ct)
1206 {
1207 int err;
1208
1209 if ((flags & V_ACE_MASK) &&
1210 vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
1211 return (EINVAL);
1212 }
1213
1214 VOPXID_MAP_CR(vp, cr);
1215
1216 err = (*(vp)->v_op->vop_access)(vp, mode, flags, cr, ct);
1217 VOPSTATS_UPDATE(vp, access);
1218 return (err);
1219 }
1220
1221 int
fop_lookup(vnode_t * dvp,char * nm,vnode_t ** vpp,pathname_t * pnp,int flags,vnode_t * rdir,cred_t * cr,caller_context_t * ct,int * deflags,pathname_t * ppnp)1222 fop_lookup(
1223 vnode_t *dvp,
1224 char *nm,
1225 vnode_t **vpp,
1226 pathname_t *pnp,
1227 int flags,
1228 vnode_t *rdir,
1229 cred_t *cr,
1230 caller_context_t *ct,
1231 int *deflags, /* Returned per-dirent flags */
1232 pathname_t *ppnp) /* Returned case-preserved name in directory */
1233 {
1234 int ret;
1235
1236 /*
1237 * If this file system doesn't support case-insensitive access
1238 * and said access is requested, fail quickly. It is required
1239 * that if the vfs supports case-insensitive lookup, it also
1240 * supports extended dirent flags.
1241 */
1242 if (flags & FIGNORECASE &&
1243 (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1244 vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1245 return (EINVAL);
1246
1247 VOPXID_MAP_CR(dvp, cr);
1248
1249 /*
1250 * The real vnode.c would call xattr_dir_lookup here,
1251 * which inserts the special "System Attribute" files:
1252 * (SUNWattr_rw, SUNWattr_ro) into the xattr list.
1253 * Here the main focus is on testing xattr support,
1254 * so the system attribute stuff is ommitted.
1255 */
1256 #if 0
1257 if ((flags & LOOKUP_XATTR) && (flags & LOOKUP_HAVE_SYSATTR_DIR) == 0) {
1258 // Don't need xattr support in libfksmbfs.
1259 // ret = xattr_dir_lookup(dvp, vpp, flags, cr);
1260 ret = EINVAL;
1261 } else
1262 #endif
1263 {
1264 ret = (*(dvp)->v_op->vop_lookup)
1265 (dvp, nm, vpp, pnp, flags, rdir, cr, ct, deflags, ppnp);
1266 }
1267 if (ret == 0 && *vpp) {
1268 VOPSTATS_UPDATE(*vpp, lookup);
1269 vn_updatepath(dvp, *vpp, nm);
1270 }
1271
1272 return (ret);
1273 }
1274
1275 int
fop_create(vnode_t * dvp,char * name,vattr_t * vap,vcexcl_t excl,int mode,vnode_t ** vpp,cred_t * cr,int flags,caller_context_t * ct,vsecattr_t * vsecp)1276 fop_create(
1277 vnode_t *dvp,
1278 char *name,
1279 vattr_t *vap,
1280 vcexcl_t excl,
1281 int mode,
1282 vnode_t **vpp,
1283 cred_t *cr,
1284 int flags,
1285 caller_context_t *ct,
1286 vsecattr_t *vsecp) /* ACL to set during create */
1287 {
1288 int ret;
1289
1290 if (vsecp != NULL &&
1291 vfs_has_feature(dvp->v_vfsp, VFSFT_ACLONCREATE) == 0) {
1292 return (EINVAL);
1293 }
1294 /*
1295 * If this file system doesn't support case-insensitive access
1296 * and said access is requested, fail quickly.
1297 */
1298 if (flags & FIGNORECASE &&
1299 (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1300 vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1301 return (EINVAL);
1302
1303 VOPXID_MAP_CR(dvp, cr);
1304
1305 ret = (*(dvp)->v_op->vop_create)
1306 (dvp, name, vap, excl, mode, vpp, cr, flags, ct, vsecp);
1307 if (ret == 0 && *vpp) {
1308 VOPSTATS_UPDATE(*vpp, create);
1309 vn_updatepath(dvp, *vpp, name);
1310 }
1311
1312 return (ret);
1313 }
1314
1315 int
fop_remove(vnode_t * dvp,char * nm,cred_t * cr,caller_context_t * ct,int flags)1316 fop_remove(
1317 vnode_t *dvp,
1318 char *nm,
1319 cred_t *cr,
1320 caller_context_t *ct,
1321 int flags)
1322 {
1323 int err;
1324
1325 /*
1326 * If this file system doesn't support case-insensitive access
1327 * and said access is requested, fail quickly.
1328 */
1329 if (flags & FIGNORECASE &&
1330 (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1331 vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1332 return (EINVAL);
1333
1334 VOPXID_MAP_CR(dvp, cr);
1335
1336 err = (*(dvp)->v_op->vop_remove)(dvp, nm, cr, ct, flags);
1337 VOPSTATS_UPDATE(dvp, remove);
1338 return (err);
1339 }
1340
1341 int
fop_link(vnode_t * tdvp,vnode_t * svp,char * tnm,cred_t * cr,caller_context_t * ct,int flags)1342 fop_link(
1343 vnode_t *tdvp,
1344 vnode_t *svp,
1345 char *tnm,
1346 cred_t *cr,
1347 caller_context_t *ct,
1348 int flags)
1349 {
1350 int err;
1351
1352 /*
1353 * If the target file system doesn't support case-insensitive access
1354 * and said access is requested, fail quickly.
1355 */
1356 if (flags & FIGNORECASE &&
1357 (vfs_has_feature(tdvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1358 vfs_has_feature(tdvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1359 return (EINVAL);
1360
1361 VOPXID_MAP_CR(tdvp, cr);
1362
1363 err = (*(tdvp)->v_op->vop_link)(tdvp, svp, tnm, cr, ct, flags);
1364 VOPSTATS_UPDATE(tdvp, link);
1365 return (err);
1366 }
1367
1368 int
fop_rename(vnode_t * sdvp,char * snm,vnode_t * tdvp,char * tnm,cred_t * cr,caller_context_t * ct,int flags)1369 fop_rename(
1370 vnode_t *sdvp,
1371 char *snm,
1372 vnode_t *tdvp,
1373 char *tnm,
1374 cred_t *cr,
1375 caller_context_t *ct,
1376 int flags)
1377 {
1378 int err;
1379
1380 /*
1381 * If the file system involved does not support
1382 * case-insensitive access and said access is requested, fail
1383 * quickly.
1384 */
1385 if (flags & FIGNORECASE &&
1386 ((vfs_has_feature(sdvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1387 vfs_has_feature(sdvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0)))
1388 return (EINVAL);
1389
1390 VOPXID_MAP_CR(tdvp, cr);
1391
1392 err = (*(sdvp)->v_op->vop_rename)(sdvp, snm, tdvp, tnm, cr, ct, flags);
1393 VOPSTATS_UPDATE(sdvp, rename);
1394 return (err);
1395 }
1396
1397 int
fop_mkdir(vnode_t * dvp,char * dirname,vattr_t * vap,vnode_t ** vpp,cred_t * cr,caller_context_t * ct,int flags,vsecattr_t * vsecp)1398 fop_mkdir(
1399 vnode_t *dvp,
1400 char *dirname,
1401 vattr_t *vap,
1402 vnode_t **vpp,
1403 cred_t *cr,
1404 caller_context_t *ct,
1405 int flags,
1406 vsecattr_t *vsecp) /* ACL to set during create */
1407 {
1408 int ret;
1409
1410 if (vsecp != NULL &&
1411 vfs_has_feature(dvp->v_vfsp, VFSFT_ACLONCREATE) == 0) {
1412 return (EINVAL);
1413 }
1414 /*
1415 * If this file system doesn't support case-insensitive access
1416 * and said access is requested, fail quickly.
1417 */
1418 if (flags & FIGNORECASE &&
1419 (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1420 vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1421 return (EINVAL);
1422
1423 VOPXID_MAP_CR(dvp, cr);
1424
1425 ret = (*(dvp)->v_op->vop_mkdir)
1426 (dvp, dirname, vap, vpp, cr, ct, flags, vsecp);
1427 if (ret == 0 && *vpp) {
1428 VOPSTATS_UPDATE(*vpp, mkdir);
1429 vn_updatepath(dvp, *vpp, dirname);
1430 }
1431
1432 return (ret);
1433 }
1434
1435 int
fop_rmdir(vnode_t * dvp,char * nm,vnode_t * cdir,cred_t * cr,caller_context_t * ct,int flags)1436 fop_rmdir(
1437 vnode_t *dvp,
1438 char *nm,
1439 vnode_t *cdir,
1440 cred_t *cr,
1441 caller_context_t *ct,
1442 int flags)
1443 {
1444 int err;
1445
1446 /*
1447 * If this file system doesn't support case-insensitive access
1448 * and said access is requested, fail quickly.
1449 */
1450 if (flags & FIGNORECASE &&
1451 (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1452 vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1453 return (EINVAL);
1454
1455 VOPXID_MAP_CR(dvp, cr);
1456
1457 err = (*(dvp)->v_op->vop_rmdir)(dvp, nm, cdir, cr, ct, flags);
1458 VOPSTATS_UPDATE(dvp, rmdir);
1459 return (err);
1460 }
1461
1462 int
fop_readdir(vnode_t * vp,uio_t * uiop,cred_t * cr,int * eofp,caller_context_t * ct,int flags)1463 fop_readdir(
1464 vnode_t *vp,
1465 uio_t *uiop,
1466 cred_t *cr,
1467 int *eofp,
1468 caller_context_t *ct,
1469 int flags)
1470 {
1471 int err;
1472 ssize_t resid_start = uiop->uio_resid;
1473
1474 /*
1475 * If this file system doesn't support retrieving directory
1476 * entry flags and said access is requested, fail quickly.
1477 */
1478 if (flags & V_RDDIR_ENTFLAGS &&
1479 vfs_has_feature(vp->v_vfsp, VFSFT_DIRENTFLAGS) == 0)
1480 return (EINVAL);
1481
1482 VOPXID_MAP_CR(vp, cr);
1483
1484 err = (*(vp)->v_op->vop_readdir)(vp, uiop, cr, eofp, ct, flags);
1485 VOPSTATS_UPDATE_IO(vp, readdir,
1486 readdir_bytes, (resid_start - uiop->uio_resid));
1487 return (err);
1488 }
1489
1490 int
fop_symlink(vnode_t * dvp,char * linkname,vattr_t * vap,char * target,cred_t * cr,caller_context_t * ct,int flags)1491 fop_symlink(
1492 vnode_t *dvp,
1493 char *linkname,
1494 vattr_t *vap,
1495 char *target,
1496 cred_t *cr,
1497 caller_context_t *ct,
1498 int flags)
1499 {
1500 int err;
1501 xvattr_t xvattr;
1502
1503 /*
1504 * If this file system doesn't support case-insensitive access
1505 * and said access is requested, fail quickly.
1506 */
1507 if (flags & FIGNORECASE &&
1508 (vfs_has_feature(dvp->v_vfsp, VFSFT_CASEINSENSITIVE) == 0 &&
1509 vfs_has_feature(dvp->v_vfsp, VFSFT_NOCASESENSITIVE) == 0))
1510 return (EINVAL);
1511
1512 VOPXID_MAP_CR(dvp, cr);
1513
1514 /* check for reparse point */
1515 if ((vfs_has_feature(dvp->v_vfsp, VFSFT_REPARSE)) &&
1516 (strncmp(target, FS_REPARSE_TAG_STR,
1517 strlen(FS_REPARSE_TAG_STR)) == 0)) {
1518 if (!fs_reparse_mark(target, vap, &xvattr))
1519 vap = (vattr_t *)&xvattr;
1520 }
1521
1522 err = (*(dvp)->v_op->vop_symlink)
1523 (dvp, linkname, vap, target, cr, ct, flags);
1524 VOPSTATS_UPDATE(dvp, symlink);
1525 return (err);
1526 }
1527
1528 int
fop_readlink(vnode_t * vp,uio_t * uiop,cred_t * cr,caller_context_t * ct)1529 fop_readlink(
1530 vnode_t *vp,
1531 uio_t *uiop,
1532 cred_t *cr,
1533 caller_context_t *ct)
1534 {
1535 int err;
1536
1537 VOPXID_MAP_CR(vp, cr);
1538
1539 err = (*(vp)->v_op->vop_readlink)(vp, uiop, cr, ct);
1540 VOPSTATS_UPDATE(vp, readlink);
1541 return (err);
1542 }
1543
1544 int
fop_fsync(vnode_t * vp,int syncflag,cred_t * cr,caller_context_t * ct)1545 fop_fsync(
1546 vnode_t *vp,
1547 int syncflag,
1548 cred_t *cr,
1549 caller_context_t *ct)
1550 {
1551 int err;
1552
1553 VOPXID_MAP_CR(vp, cr);
1554
1555 err = (*(vp)->v_op->vop_fsync)(vp, syncflag, cr, ct);
1556 VOPSTATS_UPDATE(vp, fsync);
1557 return (err);
1558 }
1559
1560 void
fop_inactive(vnode_t * vp,cred_t * cr,caller_context_t * ct)1561 fop_inactive(
1562 vnode_t *vp,
1563 cred_t *cr,
1564 caller_context_t *ct)
1565 {
1566 /* Need to update stats before vop call since we may lose the vnode */
1567 VOPSTATS_UPDATE(vp, inactive);
1568
1569 VOPXID_MAP_CR(vp, cr);
1570
1571 (*(vp)->v_op->vop_inactive)(vp, cr, ct);
1572 }
1573
1574 int
fop_fid(vnode_t * vp,fid_t * fidp,caller_context_t * ct)1575 fop_fid(
1576 vnode_t *vp,
1577 fid_t *fidp,
1578 caller_context_t *ct)
1579 {
1580 int err;
1581
1582 err = (*(vp)->v_op->vop_fid)(vp, fidp, ct);
1583 VOPSTATS_UPDATE(vp, fid);
1584 return (err);
1585 }
1586
1587 int
fop_rwlock(vnode_t * vp,int write_lock,caller_context_t * ct)1588 fop_rwlock(
1589 vnode_t *vp,
1590 int write_lock,
1591 caller_context_t *ct)
1592 {
1593 int ret;
1594
1595 ret = ((*(vp)->v_op->vop_rwlock)(vp, write_lock, ct));
1596 VOPSTATS_UPDATE(vp, rwlock);
1597 return (ret);
1598 }
1599
1600 void
fop_rwunlock(vnode_t * vp,int write_lock,caller_context_t * ct)1601 fop_rwunlock(
1602 vnode_t *vp,
1603 int write_lock,
1604 caller_context_t *ct)
1605 {
1606 (*(vp)->v_op->vop_rwunlock)(vp, write_lock, ct);
1607 VOPSTATS_UPDATE(vp, rwunlock);
1608 }
1609
1610 int
fop_seek(vnode_t * vp,offset_t ooff,offset_t * noffp,caller_context_t * ct)1611 fop_seek(
1612 vnode_t *vp,
1613 offset_t ooff,
1614 offset_t *noffp,
1615 caller_context_t *ct)
1616 {
1617 int err;
1618
1619 err = (*(vp)->v_op->vop_seek)(vp, ooff, noffp, ct);
1620 VOPSTATS_UPDATE(vp, seek);
1621 return (err);
1622 }
1623
1624 int
fop_cmp(vnode_t * vp1,vnode_t * vp2,caller_context_t * ct)1625 fop_cmp(
1626 vnode_t *vp1,
1627 vnode_t *vp2,
1628 caller_context_t *ct)
1629 {
1630 int err;
1631
1632 err = (*(vp1)->v_op->vop_cmp)(vp1, vp2, ct);
1633 VOPSTATS_UPDATE(vp1, cmp);
1634 return (err);
1635 }
1636
1637 int
fop_frlock(vnode_t * vp,int cmd,flock64_t * bfp,int flag,offset_t offset,struct flk_callback * flk_cbp,cred_t * cr,caller_context_t * ct)1638 fop_frlock(
1639 vnode_t *vp,
1640 int cmd,
1641 flock64_t *bfp,
1642 int flag,
1643 offset_t offset,
1644 struct flk_callback *flk_cbp,
1645 cred_t *cr,
1646 caller_context_t *ct)
1647 {
1648 int err;
1649
1650 VOPXID_MAP_CR(vp, cr);
1651
1652 err = (*(vp)->v_op->vop_frlock)
1653 (vp, cmd, bfp, flag, offset, flk_cbp, cr, ct);
1654 VOPSTATS_UPDATE(vp, frlock);
1655 return (err);
1656 }
1657
1658 int
fop_space(vnode_t * vp,int cmd,flock64_t * bfp,int flag,offset_t offset,cred_t * cr,caller_context_t * ct)1659 fop_space(
1660 vnode_t *vp,
1661 int cmd,
1662 flock64_t *bfp,
1663 int flag,
1664 offset_t offset,
1665 cred_t *cr,
1666 caller_context_t *ct)
1667 {
1668 int err;
1669
1670 VOPXID_MAP_CR(vp, cr);
1671
1672 err = (*(vp)->v_op->vop_space)(vp, cmd, bfp, flag, offset, cr, ct);
1673 VOPSTATS_UPDATE(vp, space);
1674 return (err);
1675 }
1676
1677 int
fop_realvp(vnode_t * vp,vnode_t ** vpp,caller_context_t * ct)1678 fop_realvp(
1679 vnode_t *vp,
1680 vnode_t **vpp,
1681 caller_context_t *ct)
1682 {
1683 int err;
1684
1685 err = (*(vp)->v_op->vop_realvp)(vp, vpp, ct);
1686 VOPSTATS_UPDATE(vp, realvp);
1687 return (err);
1688 }
1689
1690 int
fop_getpage(vnode_t * vp,offset_t off,size_t len,uint_t * protp,page_t ** plarr,size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,cred_t * cr,caller_context_t * ct)1691 fop_getpage(
1692 vnode_t *vp,
1693 offset_t off,
1694 size_t len,
1695 uint_t *protp,
1696 page_t **plarr,
1697 size_t plsz,
1698 struct seg *seg,
1699 caddr_t addr,
1700 enum seg_rw rw,
1701 cred_t *cr,
1702 caller_context_t *ct)
1703 {
1704 int err;
1705
1706 VOPXID_MAP_CR(vp, cr);
1707
1708 err = (*(vp)->v_op->vop_getpage)
1709 (vp, off, len, protp, plarr, plsz, seg, addr, rw, cr, ct);
1710 VOPSTATS_UPDATE(vp, getpage);
1711 return (err);
1712 }
1713
1714 int
fop_putpage(vnode_t * vp,offset_t off,size_t len,int flags,cred_t * cr,caller_context_t * ct)1715 fop_putpage(
1716 vnode_t *vp,
1717 offset_t off,
1718 size_t len,
1719 int flags,
1720 cred_t *cr,
1721 caller_context_t *ct)
1722 {
1723 int err;
1724
1725 VOPXID_MAP_CR(vp, cr);
1726
1727 err = (*(vp)->v_op->vop_putpage)(vp, off, len, flags, cr, ct);
1728 VOPSTATS_UPDATE(vp, putpage);
1729 return (err);
1730 }
1731
1732 int
fop_map(vnode_t * vp,offset_t off,struct as * as,caddr_t * addrp,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)1733 fop_map(
1734 vnode_t *vp,
1735 offset_t off,
1736 struct as *as,
1737 caddr_t *addrp,
1738 size_t len,
1739 uchar_t prot,
1740 uchar_t maxprot,
1741 uint_t flags,
1742 cred_t *cr,
1743 caller_context_t *ct)
1744 {
1745 int err;
1746
1747 VOPXID_MAP_CR(vp, cr);
1748
1749 err = (*(vp)->v_op->vop_map)
1750 (vp, off, as, addrp, len, prot, maxprot, flags, cr, ct);
1751 VOPSTATS_UPDATE(vp, map);
1752 return (err);
1753 }
1754
1755 int
fop_addmap(vnode_t * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)1756 fop_addmap(
1757 vnode_t *vp,
1758 offset_t off,
1759 struct as *as,
1760 caddr_t addr,
1761 size_t len,
1762 uchar_t prot,
1763 uchar_t maxprot,
1764 uint_t flags,
1765 cred_t *cr,
1766 caller_context_t *ct)
1767 {
1768 int error;
1769
1770 VOPXID_MAP_CR(vp, cr);
1771
1772 error = (*(vp)->v_op->vop_addmap)
1773 (vp, off, as, addr, len, prot, maxprot, flags, cr, ct);
1774
1775 VOPSTATS_UPDATE(vp, addmap);
1776 return (error);
1777 }
1778
1779 int
fop_delmap(vnode_t * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uint_t prot,uint_t maxprot,uint_t flags,cred_t * cr,caller_context_t * ct)1780 fop_delmap(
1781 vnode_t *vp,
1782 offset_t off,
1783 struct as *as,
1784 caddr_t addr,
1785 size_t len,
1786 uint_t prot,
1787 uint_t maxprot,
1788 uint_t flags,
1789 cred_t *cr,
1790 caller_context_t *ct)
1791 {
1792 int error;
1793
1794 VOPXID_MAP_CR(vp, cr);
1795
1796 error = (*(vp)->v_op->vop_delmap)
1797 (vp, off, as, addr, len, prot, maxprot, flags, cr, ct);
1798
1799 VOPSTATS_UPDATE(vp, delmap);
1800 return (error);
1801 }
1802
1803
1804 int
fop_poll(vnode_t * vp,short events,int anyyet,short * reventsp,struct pollhead ** phpp,caller_context_t * ct)1805 fop_poll(
1806 vnode_t *vp,
1807 short events,
1808 int anyyet,
1809 short *reventsp,
1810 struct pollhead **phpp,
1811 caller_context_t *ct)
1812 {
1813 int err;
1814
1815 err = (*(vp)->v_op->vop_poll)(vp, events, anyyet, reventsp, phpp, ct);
1816 VOPSTATS_UPDATE(vp, poll);
1817 return (err);
1818 }
1819
1820 int
fop_dump(vnode_t * vp,caddr_t addr,offset_t lbdn,offset_t dblks,caller_context_t * ct)1821 fop_dump(
1822 vnode_t *vp,
1823 caddr_t addr,
1824 offset_t lbdn,
1825 offset_t dblks,
1826 caller_context_t *ct)
1827 {
1828 int err;
1829
1830 /* ensure lbdn and dblks can be passed safely to bdev_dump */
1831 if ((lbdn != (daddr_t)lbdn) || (dblks != (int)dblks))
1832 return (EIO);
1833
1834 err = (*(vp)->v_op->vop_dump)(vp, addr, lbdn, dblks, ct);
1835 VOPSTATS_UPDATE(vp, dump);
1836 return (err);
1837 }
1838
1839 int
fop_pathconf(vnode_t * vp,int cmd,ulong_t * valp,cred_t * cr,caller_context_t * ct)1840 fop_pathconf(
1841 vnode_t *vp,
1842 int cmd,
1843 ulong_t *valp,
1844 cred_t *cr,
1845 caller_context_t *ct)
1846 {
1847 int err;
1848
1849 VOPXID_MAP_CR(vp, cr);
1850
1851 err = (*(vp)->v_op->vop_pathconf)(vp, cmd, valp, cr, ct);
1852 VOPSTATS_UPDATE(vp, pathconf);
1853 return (err);
1854 }
1855
1856 int
fop_pageio(vnode_t * vp,struct page * pp,u_offset_t io_off,size_t io_len,int flags,cred_t * cr,caller_context_t * ct)1857 fop_pageio(
1858 vnode_t *vp,
1859 struct page *pp,
1860 u_offset_t io_off,
1861 size_t io_len,
1862 int flags,
1863 cred_t *cr,
1864 caller_context_t *ct)
1865 {
1866 int err;
1867
1868 VOPXID_MAP_CR(vp, cr);
1869
1870 err = (*(vp)->v_op->vop_pageio)(vp, pp, io_off, io_len, flags, cr, ct);
1871 VOPSTATS_UPDATE(vp, pageio);
1872 return (err);
1873 }
1874
1875 int
fop_dumpctl(vnode_t * vp,int action,offset_t * blkp,caller_context_t * ct)1876 fop_dumpctl(
1877 vnode_t *vp,
1878 int action,
1879 offset_t *blkp,
1880 caller_context_t *ct)
1881 {
1882 int err;
1883 err = (*(vp)->v_op->vop_dumpctl)(vp, action, blkp, ct);
1884 VOPSTATS_UPDATE(vp, dumpctl);
1885 return (err);
1886 }
1887
1888 void
fop_dispose(vnode_t * vp,page_t * pp,int flag,int dn,cred_t * cr,caller_context_t * ct)1889 fop_dispose(
1890 vnode_t *vp,
1891 page_t *pp,
1892 int flag,
1893 int dn,
1894 cred_t *cr,
1895 caller_context_t *ct)
1896 {
1897 /* Must do stats first since it's possible to lose the vnode */
1898 VOPSTATS_UPDATE(vp, dispose);
1899
1900 VOPXID_MAP_CR(vp, cr);
1901
1902 (*(vp)->v_op->vop_dispose)(vp, pp, flag, dn, cr, ct);
1903 }
1904
1905 int
fop_setsecattr(vnode_t * vp,vsecattr_t * vsap,int flag,cred_t * cr,caller_context_t * ct)1906 fop_setsecattr(
1907 vnode_t *vp,
1908 vsecattr_t *vsap,
1909 int flag,
1910 cred_t *cr,
1911 caller_context_t *ct)
1912 {
1913 int err;
1914
1915 VOPXID_MAP_CR(vp, cr);
1916
1917 /*
1918 * We're only allowed to skip the ACL check iff we used a 32 bit
1919 * ACE mask with VOP_ACCESS() to determine permissions.
1920 */
1921 if ((flag & ATTR_NOACLCHECK) &&
1922 vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
1923 return (EINVAL);
1924 }
1925 err = (*(vp)->v_op->vop_setsecattr) (vp, vsap, flag, cr, ct);
1926 VOPSTATS_UPDATE(vp, setsecattr);
1927 return (err);
1928 }
1929
1930 int
fop_getsecattr(vnode_t * vp,vsecattr_t * vsap,int flag,cred_t * cr,caller_context_t * ct)1931 fop_getsecattr(
1932 vnode_t *vp,
1933 vsecattr_t *vsap,
1934 int flag,
1935 cred_t *cr,
1936 caller_context_t *ct)
1937 {
1938 int err;
1939
1940 /*
1941 * We're only allowed to skip the ACL check iff we used a 32 bit
1942 * ACE mask with VOP_ACCESS() to determine permissions.
1943 */
1944 if ((flag & ATTR_NOACLCHECK) &&
1945 vfs_has_feature(vp->v_vfsp, VFSFT_ACEMASKONACCESS) == 0) {
1946 return (EINVAL);
1947 }
1948
1949 VOPXID_MAP_CR(vp, cr);
1950
1951 err = (*(vp)->v_op->vop_getsecattr) (vp, vsap, flag, cr, ct);
1952 VOPSTATS_UPDATE(vp, getsecattr);
1953 return (err);
1954 }
1955
1956 int
fop_shrlock(vnode_t * vp,int cmd,struct shrlock * shr,int flag,cred_t * cr,caller_context_t * ct)1957 fop_shrlock(
1958 vnode_t *vp,
1959 int cmd,
1960 struct shrlock *shr,
1961 int flag,
1962 cred_t *cr,
1963 caller_context_t *ct)
1964 {
1965 int err;
1966
1967 VOPXID_MAP_CR(vp, cr);
1968
1969 err = (*(vp)->v_op->vop_shrlock)(vp, cmd, shr, flag, cr, ct);
1970 VOPSTATS_UPDATE(vp, shrlock);
1971 return (err);
1972 }
1973
1974 int
fop_vnevent(vnode_t * vp,vnevent_t vnevent,vnode_t * dvp,char * fnm,caller_context_t * ct)1975 fop_vnevent(vnode_t *vp, vnevent_t vnevent, vnode_t *dvp, char *fnm,
1976 caller_context_t *ct)
1977 {
1978 int err;
1979
1980 err = (*(vp)->v_op->vop_vnevent)(vp, vnevent, dvp, fnm, ct);
1981 VOPSTATS_UPDATE(vp, vnevent);
1982 return (err);
1983 }
1984
1985 // fop_reqzcbuf
1986 // fop_retzcbuf
1987
1988 // vsd_defaultdestructor
1989 // vsd_create, vsd_destroy
1990 // vsd_get, vsd_set
1991 // vsd_free, vsd_realloc
1992
1993 static int
fs_reparse_mark(char * target,vattr_t * vap,xvattr_t * xvattr)1994 fs_reparse_mark(char *target, vattr_t *vap, xvattr_t *xvattr)
1995 {
1996 return (-1);
1997 }
1998
1999 /*
2000 * Function to check whether a symlink is a reparse point.
2001 * Return B_TRUE if it is a reparse point, else return B_FALSE
2002 */
2003 boolean_t
vn_is_reparse(vnode_t * vp,cred_t * cr,caller_context_t * ct)2004 vn_is_reparse(vnode_t *vp, cred_t *cr, caller_context_t *ct)
2005 {
2006 xvattr_t xvattr;
2007 xoptattr_t *xoap;
2008
2009 if ((vp->v_type != VLNK) ||
2010 !(vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR)))
2011 return (B_FALSE);
2012
2013 xva_init(&xvattr);
2014 xoap = xva_getxoptattr(&xvattr);
2015 ASSERT(xoap);
2016 XVA_SET_REQ(&xvattr, XAT_REPARSE);
2017
2018 if (VOP_GETATTR(vp, &xvattr.xva_vattr, 0, cr, ct))
2019 return (B_FALSE);
2020
2021 if ((!(xvattr.xva_vattr.va_mask & AT_XVATTR)) ||
2022 (!(XVA_ISSET_RTN(&xvattr, XAT_REPARSE))))
2023 return (B_FALSE);
2024
2025 return (xoap->xoa_reparse ? B_TRUE : B_FALSE);
2026 }
2027