1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 24 * 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 26 * All rights reserved. 27 * 28 * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/thread.h> 34 #include <sys/t_lock.h> 35 #include <sys/time.h> 36 #include <sys/vnode.h> 37 #include <sys/vfs.h> 38 #include <sys/errno.h> 39 #include <sys/buf.h> 40 #include <sys/stat.h> 41 #include <sys/cred.h> 42 #include <sys/kmem.h> 43 #include <sys/debug.h> 44 #include <sys/vmsystm.h> 45 #include <sys/flock.h> 46 #include <sys/share.h> 47 #include <sys/cmn_err.h> 48 #include <sys/tiuser.h> 49 #include <sys/sysmacros.h> 50 #include <sys/callb.h> 51 #include <sys/acl.h> 52 #include <sys/kstat.h> 53 #include <sys/signal.h> 54 #include <sys/list.h> 55 #include <sys/zone.h> 56 57 #include <netsmb/smb.h> 58 #include <netsmb/smb_conn.h> 59 #include <netsmb/smb_subr.h> 60 61 #include <smbfs/smbfs.h> 62 #include <smbfs/smbfs_node.h> 63 #include <smbfs/smbfs_subr.h> 64 65 #ifdef _KERNEL 66 #include <vm/hat.h> 67 #include <vm/as.h> 68 #include <vm/page.h> 69 #include <vm/pvn.h> 70 #include <vm/seg.h> 71 #include <vm/seg_map.h> 72 #include <vm/seg_vn.h> 73 #endif // _KERNEL 74 75 #define ATTRCACHE_VALID(vp) (gethrtime() < VTOSMB(vp)->r_attrtime) 76 77 static int smbfs_getattr_cache(vnode_t *, smbfattr_t *); 78 static void smbfattr_to_vattr(vnode_t *, smbfattr_t *, vattr_t *); 79 static void smbfattr_to_xvattr(smbfattr_t *, vattr_t *); 80 static int smbfs_getattr_otw(vnode_t *, struct smbfattr *, cred_t *); 81 82 83 /* 84 * The following code provide zone support in order to perform an action 85 * for each smbfs mount in a zone. This is also where we would add 86 * per-zone globals and kernel threads for the smbfs module (since 87 * they must be terminated by the shutdown callback). 88 */ 89 90 struct smi_globals { 91 kmutex_t smg_lock; /* lock protecting smg_list */ 92 list_t smg_list; /* list of SMBFS mounts in zone */ 93 boolean_t smg_destructor_called; 94 }; 95 typedef struct smi_globals smi_globals_t; 96 97 static zone_key_t smi_list_key; 98 99 /* 100 * Attributes caching: 101 * 102 * Attributes are cached in the smbnode in struct vattr form. 103 * There is a time associated with the cached attributes (r_attrtime) 104 * which tells whether the attributes are valid. The time is initialized 105 * to the difference between current time and the modify time of the vnode 106 * when new attributes are cached. This allows the attributes for 107 * files that have changed recently to be timed out sooner than for files 108 * that have not changed for a long time. There are minimum and maximum 109 * timeout values that can be set per mount point. 110 */ 111 112 /* 113 * Helper for _validate_caches 114 */ 115 int 116 smbfs_waitfor_purge_complete(vnode_t *vp) 117 { 118 smbnode_t *np; 119 k_sigset_t smask; 120 121 np = VTOSMB(vp); 122 if (np->r_serial != NULL && np->r_serial != curthread) { 123 mutex_enter(&np->r_statelock); 124 sigintr(&smask, VTOSMI(vp)->smi_flags & SMI_INT); 125 while (np->r_serial != NULL) { 126 if (!cv_wait_sig(&np->r_cv, &np->r_statelock)) { 127 sigunintr(&smask); 128 mutex_exit(&np->r_statelock); 129 return (EINTR); 130 } 131 } 132 sigunintr(&smask); 133 mutex_exit(&np->r_statelock); 134 } 135 return (0); 136 } 137 138 /* 139 * Validate caches by checking cached attributes. If the cached 140 * attributes have timed out, then get new attributes from the server. 141 * As a side affect, this will do cache invalidation if the attributes 142 * have changed. 143 * 144 * If the attributes have not timed out and if there is a cache 145 * invalidation being done by some other thread, then wait until that 146 * thread has completed the cache invalidation. 147 */ 148 int 149 smbfs_validate_caches( 150 struct vnode *vp, 151 cred_t *cr) 152 { 153 struct smbfattr fa; 154 int error; 155 156 if (ATTRCACHE_VALID(vp)) { 157 error = smbfs_waitfor_purge_complete(vp); 158 if (error) 159 return (error); 160 return (0); 161 } 162 163 return (smbfs_getattr_otw(vp, &fa, cr)); 164 } 165 166 /* 167 * Purge all of the various data caches. 168 * 169 * Here NFS also had a flags arg to control what gets flushed. 170 * We only have the page cache, so no flags arg. 171 */ 172 /* ARGSUSED */ 173 void 174 smbfs_purge_caches(struct vnode *vp, cred_t *cr) 175 { 176 177 /* 178 * Here NFS has: Purge the DNLC for this vp, 179 * Clear any readdir state bits, 180 * the readlink response cache, ... 181 */ 182 183 /* 184 * Flush the page cache. 185 */ 186 if (vn_has_cached_data(vp)) { 187 (void) VOP_PUTPAGE(vp, (u_offset_t)0, 0, B_INVAL, cr, NULL); 188 } 189 190 /* 191 * Here NFS has: Flush the readdir response cache. 192 * No readdir cache in smbfs. 193 */ 194 } 195 196 /* 197 * Here NFS has: 198 * nfs_purge_rddir_cache() 199 * nfs3_cache_post_op_attr() 200 * nfs3_cache_post_op_vattr() 201 * nfs3_cache_wcc_data() 202 */ 203 204 /* 205 * Check the attribute cache to see if the new attributes match 206 * those cached. If they do, the various `data' caches are 207 * considered to be good. Otherwise, purge the cached data. 208 */ 209 static void 210 smbfs_cache_check( 211 struct vnode *vp, 212 struct smbfattr *fap, 213 cred_t *cr) 214 { 215 smbnode_t *np; 216 int purge_data = 0; 217 int purge_acl = 0; 218 219 np = VTOSMB(vp); 220 mutex_enter(&np->r_statelock); 221 222 /* 223 * Compare with NFS macro: CACHE_VALID 224 * If the mtime or size has changed, 225 * purge cached data. 226 */ 227 if (np->r_attr.fa_mtime.tv_sec != fap->fa_mtime.tv_sec || 228 np->r_attr.fa_mtime.tv_nsec != fap->fa_mtime.tv_nsec) 229 purge_data = 1; 230 if (np->r_attr.fa_size != fap->fa_size) 231 purge_data = 1; 232 233 if (np->r_attr.fa_ctime.tv_sec != fap->fa_ctime.tv_sec || 234 np->r_attr.fa_ctime.tv_nsec != fap->fa_ctime.tv_nsec) 235 purge_acl = 1; 236 237 if (purge_acl) { 238 np->r_sectime = gethrtime(); 239 } 240 241 mutex_exit(&np->r_statelock); 242 243 if (purge_data) 244 smbfs_purge_caches(vp, cr); 245 } 246 247 /* 248 * Set attributes cache for given vnode using SMB fattr 249 * and update the attribute cache timeout. 250 * 251 * Based on NFS: nfs_attrcache, nfs_attrcache_va 252 */ 253 void 254 smbfs_attrcache_fa(vnode_t *vp, struct smbfattr *fap) 255 { 256 smbnode_t *np; 257 smbmntinfo_t *smi; 258 hrtime_t delta, now; 259 u_offset_t newsize; 260 vtype_t vtype, oldvt; 261 mode_t mode; 262 263 np = VTOSMB(vp); 264 smi = VTOSMI(vp); 265 266 /* 267 * We allow v_type to change, so set that here 268 * (and the mode, which depends on the type). 269 */ 270 if (fap->fa_attr & SMB_FA_DIR) { 271 vtype = VDIR; 272 mode = smi->smi_dmode; 273 } else { 274 vtype = VREG; 275 mode = smi->smi_fmode; 276 } 277 278 mutex_enter(&np->r_statelock); 279 now = gethrtime(); 280 281 /* 282 * Delta is the number of nanoseconds that we will 283 * cache the attributes of the file. It is based on 284 * the number of nanoseconds since the last time that 285 * we detected a change. The assumption is that files 286 * that changed recently are likely to change again. 287 * There is a minimum and a maximum for regular files 288 * and for directories which is enforced though. 289 * 290 * Using the time since last change was detected 291 * eliminates direct comparison or calculation 292 * using mixed client and server times. SMBFS 293 * does not make any assumptions regarding the 294 * client and server clocks being synchronized. 295 */ 296 if (fap->fa_mtime.tv_sec != np->r_attr.fa_mtime.tv_sec || 297 fap->fa_mtime.tv_nsec != np->r_attr.fa_mtime.tv_nsec || 298 fap->fa_size != np->r_attr.fa_size) 299 np->r_mtime = now; 300 301 if ((smi->smi_flags & SMI_NOAC) || (vp->v_flag & VNOCACHE)) 302 delta = 0; 303 else { 304 delta = now - np->r_mtime; 305 if (vtype == VDIR) { 306 if (delta < smi->smi_acdirmin) 307 delta = smi->smi_acdirmin; 308 else if (delta > smi->smi_acdirmax) 309 delta = smi->smi_acdirmax; 310 } else { 311 if (delta < smi->smi_acregmin) 312 delta = smi->smi_acregmin; 313 else if (delta > smi->smi_acregmax) 314 delta = smi->smi_acregmax; 315 } 316 } 317 318 np->r_attrtime = now + delta; 319 np->r_attr = *fap; 320 np->n_mode = mode; 321 oldvt = vp->v_type; 322 vp->v_type = vtype; 323 324 /* 325 * Shall we update r_size? (local notion of size) 326 * 327 * The real criteria for updating r_size should be: 328 * if the file has grown on the server, or if 329 * the client has not modified the file. 330 * 331 * Also deal with the fact that SMB presents 332 * directories as having size=0. Doing that 333 * here and leaving fa_size as returned OtW 334 * avoids fixing the size lots of places. 335 */ 336 newsize = fap->fa_size; 337 if (vtype == VDIR && newsize < DEV_BSIZE) 338 newsize = DEV_BSIZE; 339 340 if (np->r_size != newsize && 341 (!vn_has_cached_data(vp) || 342 (!(np->r_flags & RDIRTY) && np->r_count == 0))) { 343 /* OK to set the size. */ 344 np->r_size = newsize; 345 } 346 347 /* 348 * Here NFS has: 349 * nfs_setswaplike(vp, va); 350 * np->r_flags &= ~RWRITEATTR; 351 * (not needed here) 352 */ 353 354 np->n_flag &= ~NATTRCHANGED; 355 mutex_exit(&np->r_statelock); 356 357 if (oldvt != vtype) { 358 SMBVDEBUG("vtype change %d to %d\n", oldvt, vtype); 359 } 360 } 361 362 /* 363 * Fill in attribute from the cache. 364 * 365 * If valid, copy to *fap and return zero, 366 * otherwise return an error. 367 * 368 * From NFS: nfs_getattr_cache() 369 */ 370 int 371 smbfs_getattr_cache(vnode_t *vp, struct smbfattr *fap) 372 { 373 smbnode_t *np; 374 int error; 375 376 np = VTOSMB(vp); 377 378 mutex_enter(&np->r_statelock); 379 if (gethrtime() >= np->r_attrtime) { 380 /* cache expired */ 381 error = ENOENT; 382 } else { 383 /* cache is valid */ 384 *fap = np->r_attr; 385 error = 0; 386 } 387 mutex_exit(&np->r_statelock); 388 389 return (error); 390 } 391 392 /* 393 * Get attributes over-the-wire and update attributes cache 394 * if no error occurred in the over-the-wire operation. 395 * Return 0 if successful, otherwise error. 396 * From NFS: nfs_getattr_otw 397 */ 398 static int 399 smbfs_getattr_otw(vnode_t *vp, struct smbfattr *fap, cred_t *cr) 400 { 401 struct smbnode *np; 402 struct smb_cred scred; 403 int error; 404 405 np = VTOSMB(vp); 406 407 /* 408 * Here NFS uses the ACL RPC (if smi_flags & SMI_ACL) 409 * With SMB, getting the ACL is a significantly more 410 * expensive operation, so we do that only when asked 411 * for the uid/gid. See smbfsgetattr(). 412 */ 413 414 /* Shared lock for (possible) n_fid use. */ 415 if (smbfs_rw_enter_sig(&np->r_lkserlock, RW_READER, SMBINTR(vp))) 416 return (EINTR); 417 smb_credinit(&scred, cr); 418 419 bzero(fap, sizeof (*fap)); 420 error = smbfs_smb_getfattr(np, fap, &scred); 421 422 smb_credrele(&scred); 423 smbfs_rw_exit(&np->r_lkserlock); 424 425 if (error) { 426 /* Here NFS has: PURGE_STALE_FH(error, vp, cr) */ 427 smbfs_attrcache_remove(np); 428 if (error == ENOENT || error == ENOTDIR) { 429 /* 430 * Getattr failed because the object was 431 * removed or renamed by another client. 432 * Remove any cached attributes under it. 433 */ 434 smbfs_attrcache_prune(np); 435 } 436 return (error); 437 } 438 439 /* 440 * Here NFS has: nfs_cache_fattr(vap, fa, vap, t, cr); 441 * which did: fattr_to_vattr, nfs_attr_cache. 442 * We cache the fattr form, so just do the 443 * cache check and store the attributes. 444 */ 445 smbfs_cache_check(vp, fap, cr); 446 smbfs_attrcache_fa(vp, fap); 447 448 return (0); 449 } 450 451 /* 452 * Return either cached or remote attributes. If we get remote attrs, 453 * use them to check and invalidate caches, then cache the new attributes. 454 * 455 * From NFS: nfsgetattr() 456 */ 457 int 458 smbfsgetattr(vnode_t *vp, struct vattr *vap, cred_t *cr) 459 { 460 struct smbfattr fa; 461 smbmntinfo_t *smi; 462 uint_t mask; 463 int error; 464 465 smi = VTOSMI(vp); 466 467 ASSERT(curproc->p_zone == smi->smi_zone_ref.zref_zone); 468 469 /* 470 * If asked for UID or GID, update n_uid, n_gid. 471 */ 472 mask = AT_ALL; 473 if (vap->va_mask & (AT_UID | AT_GID)) { 474 if (smi->smi_flags & SMI_ACL) 475 (void) smbfs_acl_getids(vp, cr); 476 /* else leave as set in make_smbnode */ 477 } else { 478 mask &= ~(AT_UID | AT_GID); 479 } 480 481 /* 482 * If we've got cached attributes, just use them; 483 * otherwise go to the server to get attributes, 484 * which will update the cache in the process. 485 */ 486 error = smbfs_getattr_cache(vp, &fa); 487 if (error) 488 error = smbfs_getattr_otw(vp, &fa, cr); 489 if (error) 490 return (error); 491 vap->va_mask |= mask; 492 493 /* 494 * Re. client's view of the file size, see: 495 * smbfs_attrcache_fa, smbfs_getattr_otw 496 */ 497 smbfattr_to_vattr(vp, &fa, vap); 498 if (vap->va_mask & AT_XVATTR) 499 smbfattr_to_xvattr(&fa, vap); 500 501 return (0); 502 } 503 504 505 /* 506 * Convert SMB over the wire attributes to vnode form. 507 * Returns 0 for success, error if failed (overflow, etc). 508 * From NFS: nattr_to_vattr() 509 */ 510 void 511 smbfattr_to_vattr(vnode_t *vp, struct smbfattr *fa, struct vattr *vap) 512 { 513 struct smbnode *np = VTOSMB(vp); 514 515 /* 516 * Take type, mode, uid, gid from the smbfs node, 517 * which has have been updated by _getattr_otw. 518 */ 519 vap->va_type = vp->v_type; 520 vap->va_mode = np->n_mode; 521 522 vap->va_uid = np->n_uid; 523 vap->va_gid = np->n_gid; 524 525 vap->va_fsid = vp->v_vfsp->vfs_dev; 526 vap->va_nodeid = np->n_ino; 527 vap->va_nlink = 1; 528 529 /* 530 * Difference from NFS here: We cache attributes as 531 * reported by the server, so r_attr.fa_size is the 532 * server's idea of the file size. This is called 533 * for getattr, so we want to return the client's 534 * idea of the file size. NFS deals with that in 535 * nfsgetattr(), the equivalent of our caller. 536 */ 537 vap->va_size = np->r_size; 538 539 /* 540 * Times. Note, already converted from NT to 541 * Unix form (in the unmarshalling code). 542 */ 543 vap->va_atime = fa->fa_atime; 544 vap->va_mtime = fa->fa_mtime; 545 vap->va_ctime = fa->fa_ctime; 546 547 /* 548 * rdev, blksize, seq are made up. 549 * va_nblocks is 512 byte blocks. 550 */ 551 vap->va_rdev = vp->v_rdev; 552 vap->va_blksize = MAXBSIZE; 553 vap->va_nblocks = (fsblkcnt64_t)btod(np->r_attr.fa_allocsz); 554 vap->va_seq = 0; 555 } 556 557 /* 558 * smbfattr_to_xvattr: like smbfattr_to_vattr but for 559 * Extensible system attributes (PSARC 2007/315) 560 */ 561 static void 562 smbfattr_to_xvattr(struct smbfattr *fa, struct vattr *vap) 563 { 564 xvattr_t *xvap = (xvattr_t *)vap; /* *vap may be xvattr_t */ 565 xoptattr_t *xoap = NULL; 566 567 if ((xoap = xva_getxoptattr(xvap)) == NULL) 568 return; 569 570 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 571 xoap->xoa_createtime = fa->fa_createtime; 572 XVA_SET_RTN(xvap, XAT_CREATETIME); 573 } 574 575 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 576 xoap->xoa_archive = 577 ((fa->fa_attr & SMB_FA_ARCHIVE) != 0); 578 XVA_SET_RTN(xvap, XAT_ARCHIVE); 579 } 580 581 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 582 xoap->xoa_system = 583 ((fa->fa_attr & SMB_FA_SYSTEM) != 0); 584 XVA_SET_RTN(xvap, XAT_SYSTEM); 585 } 586 587 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 588 xoap->xoa_readonly = 589 ((fa->fa_attr & SMB_FA_RDONLY) != 0); 590 XVA_SET_RTN(xvap, XAT_READONLY); 591 } 592 593 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 594 xoap->xoa_hidden = 595 ((fa->fa_attr & SMB_FA_HIDDEN) != 0); 596 XVA_SET_RTN(xvap, XAT_HIDDEN); 597 } 598 } 599 600 /* 601 * Here NFS has: 602 * nfs_async_... stuff 603 * which we're not using (no async I/O), and: 604 * writerp(), 605 * nfs_putpages() 606 * nfs_invalidate_pages() 607 * which we have in smbfs_vnops.c, and 608 * nfs_printfhandle() 609 * nfs_write_error() 610 * not needed here. 611 */ 612 613 /* 614 * Helper function for smbfs_sync 615 * 616 * Walk the per-zone list of smbfs mounts, calling smbfs_rflush 617 * on each one. This is a little tricky because we need to exit 618 * the list mutex before each _rflush call and then try to resume 619 * where we were in the list after re-entering the mutex. 620 */ 621 void 622 smbfs_flushall(cred_t *cr) 623 { 624 smi_globals_t *smg; 625 smbmntinfo_t *tmp_smi, *cur_smi, *next_smi; 626 627 smg = zone_getspecific(smi_list_key, crgetzone(cr)); 628 ASSERT(smg != NULL); 629 630 mutex_enter(&smg->smg_lock); 631 cur_smi = list_head(&smg->smg_list); 632 if (cur_smi == NULL) { 633 mutex_exit(&smg->smg_lock); 634 return; 635 } 636 VFS_HOLD(cur_smi->smi_vfsp); 637 mutex_exit(&smg->smg_lock); 638 639 flush: 640 smbfs_rflush(cur_smi->smi_vfsp, cr); 641 642 mutex_enter(&smg->smg_lock); 643 /* 644 * Resume after cur_smi if that's still on the list, 645 * otherwise restart at the head. 646 */ 647 for (tmp_smi = list_head(&smg->smg_list); 648 tmp_smi != NULL; 649 tmp_smi = list_next(&smg->smg_list, tmp_smi)) 650 if (tmp_smi == cur_smi) 651 break; 652 if (tmp_smi != NULL) 653 next_smi = list_next(&smg->smg_list, tmp_smi); 654 else 655 next_smi = list_head(&smg->smg_list); 656 657 if (next_smi != NULL) 658 VFS_HOLD(next_smi->smi_vfsp); 659 VFS_RELE(cur_smi->smi_vfsp); 660 661 mutex_exit(&smg->smg_lock); 662 663 if (next_smi != NULL) { 664 cur_smi = next_smi; 665 goto flush; 666 } 667 } 668 669 /* 670 * SMB Client initialization and cleanup. 671 * Much of it is per-zone now. 672 */ 673 674 675 /* ARGSUSED */ 676 static void * 677 smbfs_zone_init(zoneid_t zoneid) 678 { 679 smi_globals_t *smg; 680 681 smg = kmem_alloc(sizeof (*smg), KM_SLEEP); 682 mutex_init(&smg->smg_lock, NULL, MUTEX_DEFAULT, NULL); 683 list_create(&smg->smg_list, sizeof (smbmntinfo_t), 684 offsetof(smbmntinfo_t, smi_zone_node)); 685 smg->smg_destructor_called = B_FALSE; 686 return (smg); 687 } 688 689 /* 690 * Callback routine to tell all SMBFS mounts in the zone to stop creating new 691 * threads. Existing threads should exit. 692 */ 693 /* ARGSUSED */ 694 static void 695 smbfs_zone_shutdown(zoneid_t zoneid, void *data) 696 { 697 smi_globals_t *smg = data; 698 smbmntinfo_t *smi; 699 700 ASSERT(smg != NULL); 701 again: 702 mutex_enter(&smg->smg_lock); 703 for (smi = list_head(&smg->smg_list); smi != NULL; 704 smi = list_next(&smg->smg_list, smi)) { 705 706 /* 707 * If we've done the shutdown work for this FS, skip. 708 * Once we go off the end of the list, we're done. 709 */ 710 if (smi->smi_flags & SMI_DEAD) 711 continue; 712 713 /* 714 * We will do work, so not done. Get a hold on the FS. 715 */ 716 VFS_HOLD(smi->smi_vfsp); 717 718 mutex_enter(&smi->smi_lock); 719 smi->smi_flags |= SMI_DEAD; 720 mutex_exit(&smi->smi_lock); 721 722 /* 723 * Drop lock and release FS, which may change list, then repeat. 724 * We're done when every mi has been done or the list is empty. 725 */ 726 mutex_exit(&smg->smg_lock); 727 VFS_RELE(smi->smi_vfsp); 728 goto again; 729 } 730 mutex_exit(&smg->smg_lock); 731 } 732 733 static void 734 smbfs_zone_free_globals(smi_globals_t *smg) 735 { 736 list_destroy(&smg->smg_list); /* makes sure the list is empty */ 737 mutex_destroy(&smg->smg_lock); 738 kmem_free(smg, sizeof (*smg)); 739 740 } 741 742 /* ARGSUSED */ 743 static void 744 smbfs_zone_destroy(zoneid_t zoneid, void *data) 745 { 746 smi_globals_t *smg = data; 747 748 ASSERT(smg != NULL); 749 mutex_enter(&smg->smg_lock); 750 if (list_head(&smg->smg_list) != NULL) { 751 /* Still waiting for VFS_FREEVFS() */ 752 smg->smg_destructor_called = B_TRUE; 753 mutex_exit(&smg->smg_lock); 754 return; 755 } 756 smbfs_zone_free_globals(smg); 757 } 758 759 /* 760 * Add an SMBFS mount to the per-zone list of SMBFS mounts. 761 */ 762 void 763 smbfs_zonelist_add(smbmntinfo_t *smi) 764 { 765 smi_globals_t *smg; 766 767 smg = zone_getspecific(smi_list_key, smi->smi_zone_ref.zref_zone); 768 mutex_enter(&smg->smg_lock); 769 list_insert_head(&smg->smg_list, smi); 770 mutex_exit(&smg->smg_lock); 771 } 772 773 /* 774 * Remove an SMBFS mount from the per-zone list of SMBFS mounts. 775 */ 776 void 777 smbfs_zonelist_remove(smbmntinfo_t *smi) 778 { 779 smi_globals_t *smg; 780 781 smg = zone_getspecific(smi_list_key, smi->smi_zone_ref.zref_zone); 782 mutex_enter(&smg->smg_lock); 783 list_remove(&smg->smg_list, smi); 784 /* 785 * We can be called asynchronously by VFS_FREEVFS() after the zone 786 * shutdown/destroy callbacks have executed; if so, clean up the zone's 787 * smi_globals. 788 */ 789 if (list_head(&smg->smg_list) == NULL && 790 smg->smg_destructor_called == B_TRUE) { 791 smbfs_zone_free_globals(smg); 792 return; 793 } 794 mutex_exit(&smg->smg_lock); 795 } 796 797 #ifdef lint 798 #define NEED_SMBFS_CALLBACKS 1 799 #endif 800 801 #ifdef NEED_SMBFS_CALLBACKS 802 /* 803 * Call-back hooks for netsmb, in case we want them. 804 * Apple's VFS wants them. We may not need them. 805 */ 806 /*ARGSUSED*/ 807 static void smbfs_dead(smb_share_t *ssp) 808 { 809 /* 810 * Walk the mount list, finding all mounts 811 * using this share... 812 */ 813 } 814 815 /*ARGSUSED*/ 816 static void smbfs_cb_nop(smb_share_t *ss) 817 { 818 /* no-op */ 819 } 820 821 smb_fscb_t smbfs_cb = { 822 .fscb_disconn = smbfs_dead, 823 .fscb_connect = smbfs_cb_nop, 824 .fscb_down = smbfs_cb_nop, 825 .fscb_up = smbfs_cb_nop }; 826 827 #endif /* NEED_SMBFS_CALLBACKS */ 828 829 /* 830 * SMBFS Client initialization routine. This routine should only be called 831 * once. It performs the following tasks: 832 * - Initalize all global locks 833 * - Call sub-initialization routines (localize access to variables) 834 */ 835 int 836 smbfs_clntinit(void) 837 { 838 839 zone_key_create(&smi_list_key, smbfs_zone_init, smbfs_zone_shutdown, 840 smbfs_zone_destroy); 841 #ifdef NEED_SMBFS_CALLBACKS 842 (void) smb_fscb_set(&smbfs_cb); 843 #endif /* NEED_SMBFS_CALLBACKS */ 844 return (0); 845 } 846 847 /* 848 * This routine is called when the modunload is called. This will cleanup 849 * the previously allocated/initialized nodes. 850 */ 851 void 852 smbfs_clntfini(void) 853 { 854 #ifdef NEED_SMBFS_CALLBACKS 855 (void) smb_fscb_set(NULL); 856 #endif /* NEED_SMBFS_CALLBACKS */ 857 (void) zone_key_delete(smi_list_key); 858 } 859