xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_conn.c (revision 40c0e2317898b8c774791bdc2b30bd50111ab1fa)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_conn.c,v 1.27.166.1 2005/05/27 02:35:29 lindak Exp $
33  */
34 /*
35  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
36  * Use is subject to license terms.
37  *
38  * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
39  */
40 
41 /*
42  * Connection engine.
43  */
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kmem.h>
48 #include <sys/proc.h>
49 #include <sys/lock.h>
50 #include <sys/vnode.h>
51 #include <sys/stream.h>
52 #include <sys/stropts.h>
53 #include <sys/socketvar.h>
54 #include <sys/cred.h>
55 #include <netinet/in.h>
56 #include <inet/ip.h>
57 #include <inet/ip6.h>
58 #include <sys/cmn_err.h>
59 #include <sys/thread.h>
60 #include <sys/atomic.h>
61 #include <sys/u8_textprep.h>
62 
63 #include <netsmb/smb_osdep.h>
64 
65 #include <netsmb/smb.h>
66 #include <netsmb/smb_conn.h>
67 #include <netsmb/smb_subr.h>
68 #include <netsmb/smb_tran.h>
69 #include <netsmb/smb_pass.h>
70 
71 static struct smb_connobj smb_vclist;
72 
73 void smb_co_init(struct smb_connobj *cp, int level, char *objname);
74 void smb_co_done(struct smb_connobj *cp);
75 void smb_co_hold(struct smb_connobj *cp);
76 void smb_co_rele(struct smb_connobj *cp);
77 void smb_co_kill(struct smb_connobj *cp);
78 
79 static void smb_vc_free(struct smb_connobj *cp);
80 static void smb_vc_gone(struct smb_connobj *cp);
81 
82 static void smb_share_free(struct smb_connobj *cp);
83 static void smb_share_gone(struct smb_connobj *cp);
84 
85 int
86 smb_sm_init(void)
87 {
88 	smb_co_init(&smb_vclist, SMBL_SM, "smbsm");
89 	return (0);
90 }
91 
92 int
93 smb_sm_idle(void)
94 {
95 	int error = 0;
96 	SMB_CO_LOCK(&smb_vclist);
97 	if (smb_vclist.co_usecount > 1) {
98 		SMBSDEBUG("%d connections still active\n",
99 		    smb_vclist.co_usecount - 1);
100 		error = EBUSY;
101 	}
102 	SMB_CO_UNLOCK(&smb_vclist);
103 	return (error);
104 }
105 
106 void
107 smb_sm_done(void)
108 {
109 	/*
110 	 * Why are we not iterating on smb_vclist here?
111 	 * Because the caller has just called smb_sm_idle() to
112 	 * make sure we have no VCs before calling this.
113 	 */
114 	smb_co_done(&smb_vclist);
115 }
116 
117 
118 
119 /*
120  * Common code for connection object
121  */
122 /*ARGSUSED*/
123 void
124 smb_co_init(struct smb_connobj *cp, int level, char *objname)
125 {
126 
127 	mutex_init(&cp->co_lock, objname,  MUTEX_DRIVER, NULL);
128 
129 	cp->co_level = level;
130 	cp->co_usecount = 1;
131 	SLIST_INIT(&cp->co_children);
132 }
133 
134 /*
135  * Called just before free of an object
136  * of which smb_connobj is a part, i.e.
137  * _vc_free, _share_free, also sm_done.
138  */
139 void
140 smb_co_done(struct smb_connobj *cp)
141 {
142 	ASSERT(SLIST_EMPTY(&cp->co_children));
143 	mutex_destroy(&cp->co_lock);
144 }
145 
146 static void
147 smb_co_addchild(
148 	struct smb_connobj *parent,
149 	struct smb_connobj *child)
150 {
151 
152 	/*
153 	 * Set the child's pointer to the parent.
154 	 * No references yet, so no need to lock.
155 	 */
156 	ASSERT(child->co_usecount == 1);
157 	child->co_parent = parent;
158 
159 	/*
160 	 * Add the child to the parent's list of
161 	 * children, and in-line smb_co_hold
162 	 */
163 	ASSERT(MUTEX_HELD(&parent->co_lock));
164 	parent->co_usecount++;
165 	SLIST_INSERT_HEAD(&parent->co_children, child, co_next);
166 }
167 
168 void
169 smb_co_hold(struct smb_connobj *cp)
170 {
171 	SMB_CO_LOCK(cp);
172 	cp->co_usecount++;
173 	SMB_CO_UNLOCK(cp);
174 }
175 
176 /*
177  * Called via smb_vc_rele, smb_share_rele
178  */
179 void
180 smb_co_rele(struct smb_connobj *co)
181 {
182 	struct smb_connobj *parent;
183 	int old_flags;
184 
185 	SMB_CO_LOCK(co);
186 	if (co->co_usecount > 1) {
187 		co->co_usecount--;
188 		SMB_CO_UNLOCK(co);
189 		return;
190 	}
191 	ASSERT(co->co_usecount == 1);
192 	co->co_usecount = 0;
193 
194 	/*
195 	 * This list of children should be empty now.
196 	 * Check this while we're still linked, so
197 	 * we have a better chance of debugging.
198 	 */
199 	ASSERT(SLIST_EMPTY(&co->co_children));
200 
201 	/*
202 	 * OK, this element is going away.
203 	 *
204 	 * We need to drop the lock on this CO so we can take the
205 	 * parent CO lock. The _GONE flag prevents this CO from
206 	 * getting new references before we can unlink it from the
207 	 * parent list.
208 	 *
209 	 * The _GONE flag is also used to ensure that the co_gone
210 	 * function is called only once.  Note that smb_co_kill may
211 	 * do this before we get here.  If we find that the _GONE
212 	 * flag was not already set, then call the co_gone hook
213 	 * (smb_share_gone, smb_vc_gone) which will disconnect
214 	 * the share or the VC, respectively.
215 	 *
216 	 * Note the old: smb_co_gone(co, scred);
217 	 * is now in-line here.
218 	 */
219 	old_flags = co->co_flags;
220 	co->co_flags |= SMBO_GONE;
221 	SMB_CO_UNLOCK(co);
222 
223 	if ((old_flags & SMBO_GONE) == 0 && co->co_gone)
224 		co->co_gone(co);
225 
226 	/*
227 	 * If we have a parent (only smb_vclist does not)
228 	 * then unlink from parent's list of children.
229 	 * We have the only reference to the child.
230 	 */
231 	parent = co->co_parent;
232 	if (parent) {
233 		SMB_CO_LOCK(parent);
234 		ASSERT(SLIST_FIRST(&parent->co_children));
235 		if (SLIST_FIRST(&parent->co_children)) {
236 			SLIST_REMOVE(&parent->co_children, co,
237 			    smb_connobj, co_next);
238 		}
239 		SMB_CO_UNLOCK(parent);
240 	}
241 
242 	/*
243 	 * Now it's safe to free the CO
244 	 */
245 	if (co->co_free) {
246 		co->co_free(co);
247 	}
248 
249 	/*
250 	 * Finally, if the CO had a parent, decrement
251 	 * the parent's hold count for the lost child.
252 	 */
253 	if (parent) {
254 		/*
255 		 * Recursive call here (easier for debugging).
256 		 * Can only go two levels.
257 		 */
258 		smb_co_rele(parent);
259 	}
260 }
261 
262 /*
263  * Do just the first part of what co_gone does,
264  * i.e. tree disconnect, or disconnect a VC.
265  * This is used to forcibly close things.
266  */
267 void
268 smb_co_kill(struct smb_connobj *co)
269 {
270 	int old_flags;
271 
272 	SMB_CO_LOCK(co);
273 	old_flags = co->co_flags;
274 	co->co_flags |= SMBO_GONE;
275 	SMB_CO_UNLOCK(co);
276 
277 	/*
278 	 * Do the same "call only once" logic here as in
279 	 * smb_co_rele, though it's probably not possible
280 	 * for this to be called after smb_co_rele.
281 	 */
282 	if ((old_flags & SMBO_GONE) == 0 && co->co_gone)
283 		co->co_gone(co);
284 
285 	/* XXX: Walk list of children and kill those too? */
286 }
287 
288 
289 /*
290  * Session objects, which are referred to as "VC" for
291  * "virtual cirtuit". This has nothing to do with the
292  * CIFS notion of a "virtual cirtuit".  See smb_conn.h
293  */
294 
295 void
296 smb_vc_hold(struct smb_vc *vcp)
297 {
298 	smb_co_hold(VCTOCP(vcp));
299 }
300 
301 void
302 smb_vc_rele(struct smb_vc *vcp)
303 {
304 	smb_co_rele(VCTOCP(vcp));
305 }
306 
307 void
308 smb_vc_kill(struct smb_vc *vcp)
309 {
310 	smb_co_kill(VCTOCP(vcp));
311 }
312 
313 /*
314  * Normally called via smb_vc_rele()
315  * after co_usecount drops to zero.
316  * Also called via: smb_vc_kill()
317  *
318  * Shutdown the VC to this server,
319  * invalidate shares linked with it.
320  */
321 /*ARGSUSED*/
322 static void
323 smb_vc_gone(struct smb_connobj *cp)
324 {
325 	struct smb_vc *vcp = CPTOVC(cp);
326 
327 	/*
328 	 * Was smb_vc_disconnect(vcp);
329 	 */
330 	smb_iod_disconnect(vcp);
331 }
332 
333 /*
334  * The VC has no more references.  Free it.
335  * No locks needed here.
336  */
337 static void
338 smb_vc_free(struct smb_connobj *cp)
339 {
340 	struct smb_vc *vcp = CPTOVC(cp);
341 
342 	/*
343 	 * The _gone call should have emptied the request list,
344 	 * but let's make sure, as requests may have references
345 	 * to this VC without taking a hold.  (The hold is the
346 	 * responsibility of threads placing requests.)
347 	 */
348 	ASSERT(vcp->iod_rqlist.tqh_first == NULL);
349 
350 	if (vcp->vc_tdata)
351 		SMB_TRAN_DONE(vcp);
352 
353 /*
354  * We are not using the iconv routines here. So commenting them for now.
355  * REVISIT.
356  */
357 #ifdef NOTYETDEFINED
358 	if (vcp->vc_tolower)
359 		iconv_close(vcp->vc_tolower);
360 	if (vcp->vc_toupper)
361 		iconv_close(vcp->vc_toupper);
362 	if (vcp->vc_tolocal)
363 		iconv_close(vcp->vc_tolocal);
364 	if (vcp->vc_toserver)
365 		iconv_close(vcp->vc_toserver);
366 #endif
367 
368 	if (vcp->vc_mackey != NULL)
369 		kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
370 	if (vcp->vc_ssnkey != NULL)
371 		kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
372 
373 	cv_destroy(&vcp->iod_idle);
374 	rw_destroy(&vcp->iod_rqlock);
375 	sema_destroy(&vcp->vc_sendlock);
376 	cv_destroy(&vcp->vc_statechg);
377 	smb_co_done(VCTOCP(vcp));
378 	kmem_free(vcp, sizeof (*vcp));
379 }
380 
381 /*ARGSUSED*/
382 int
383 smb_vc_create(smbioc_ossn_t *ossn, smb_cred_t *scred, smb_vc_t **vcpp)
384 {
385 	static char objtype[] = "smb_vc";
386 	cred_t *cr = scred->scr_cred;
387 	struct smb_vc *vcp;
388 	int error = 0;
389 
390 	ASSERT(MUTEX_HELD(&smb_vclist.co_lock));
391 
392 	vcp = kmem_zalloc(sizeof (struct smb_vc), KM_SLEEP);
393 
394 	smb_co_init(VCTOCP(vcp), SMBL_VC, objtype);
395 	vcp->vc_co.co_free = smb_vc_free;
396 	vcp->vc_co.co_gone = smb_vc_gone;
397 
398 	cv_init(&vcp->vc_statechg, objtype, CV_DRIVER, NULL);
399 	sema_init(&vcp->vc_sendlock, 1, objtype, SEMA_DRIVER, NULL);
400 	rw_init(&vcp->iod_rqlock, objtype, RW_DRIVER, NULL);
401 	cv_init(&vcp->iod_idle, objtype, CV_DRIVER, NULL);
402 
403 	/* Expanded TAILQ_HEAD_INITIALIZER */
404 	vcp->iod_rqlist.tqh_last = &vcp->iod_rqlist.tqh_first;
405 
406 	/* A brand new VC should connect. */
407 	vcp->vc_state = SMBIOD_ST_RECONNECT;
408 
409 	/*
410 	 * These identify the connection.
411 	 */
412 	vcp->vc_zoneid = getzoneid();
413 	bcopy(ossn, &vcp->vc_ssn, sizeof (*ossn));
414 
415 	/* This fills in vcp->vc_tdata */
416 	vcp->vc_tdesc = &smb_tran_nbtcp_desc;
417 	if ((error = SMB_TRAN_CREATE(vcp, cr)) != 0)
418 		goto errout;
419 
420 	/* Success! */
421 	smb_co_addchild(&smb_vclist, VCTOCP(vcp));
422 	*vcpp = vcp;
423 	return (0);
424 
425 errout:
426 	/*
427 	 * This will destroy the new vc.
428 	 * See: smb_vc_free
429 	 */
430 	smb_vc_rele(vcp);
431 	return (error);
432 }
433 
434 /*
435  * Find or create a VC identified by the info in ossn
436  * and return it with a "hold", but not locked.
437  */
438 /*ARGSUSED*/
439 int
440 smb_vc_findcreate(smbioc_ossn_t *ossn, smb_cred_t *scred, smb_vc_t **vcpp)
441 {
442 	struct smb_connobj *co;
443 	struct smb_vc *vcp;
444 	smbioc_ssn_ident_t *vc_id;
445 	int error;
446 	zoneid_t zoneid = getzoneid();
447 
448 	*vcpp = vcp = NULL;
449 
450 	SMB_CO_LOCK(&smb_vclist);
451 
452 	/* var, head, next_field */
453 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
454 		vcp = CPTOVC(co);
455 
456 		/*
457 		 * Some things we can check without
458 		 * holding the lock (those that are
459 		 * set at creation and never change).
460 		 */
461 
462 		/* VCs in other zones are invisibile. */
463 		if (vcp->vc_zoneid != zoneid)
464 			continue;
465 
466 		/* Also segregate by Unix owner. */
467 		if (vcp->vc_owner != ossn->ssn_owner)
468 			continue;
469 
470 		/*
471 		 * Compare identifying info:
472 		 * server address, user, domain
473 		 * names are case-insensitive
474 		 */
475 		vc_id = &vcp->vc_ssn.ssn_id;
476 		if (bcmp(&vc_id->id_srvaddr,
477 		    &ossn->ssn_id.id_srvaddr,
478 		    sizeof (vc_id->id_srvaddr)))
479 			continue;
480 		if (u8_strcmp(vc_id->id_user, ossn->ssn_id.id_user, 0,
481 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
482 			continue;
483 		if (u8_strcmp(vc_id->id_domain, ossn->ssn_id.id_domain, 0,
484 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
485 			continue;
486 
487 		/*
488 		 * We have a match, but still have to check
489 		 * the _GONE flag, and do that with a lock.
490 		 * No new references when _GONE is set.
491 		 *
492 		 * Also clear SMBVOPT_CREATE which the caller
493 		 * may check to find out if we did create.
494 		 */
495 		SMB_VC_LOCK(vcp);
496 		if ((vcp->vc_flags & SMBV_GONE) == 0) {
497 			ossn->ssn_vopt &= ~SMBVOPT_CREATE;
498 			/*
499 			 * Return it held, unlocked.
500 			 * In-line smb_vc_hold here.
501 			 */
502 			co->co_usecount++;
503 			SMB_VC_UNLOCK(vcp);
504 			*vcpp = vcp;
505 			error = 0;
506 			goto out;
507 		}
508 		SMB_VC_UNLOCK(vcp);
509 		/* keep looking. */
510 	}
511 	vcp = NULL;
512 
513 	/* Note: smb_vclist is still locked. */
514 
515 	if (ossn->ssn_vopt & SMBVOPT_CREATE) {
516 		/*
517 		 * Create a new VC.  It starts out with
518 		 * hold count = 1, so don't incr. here.
519 		 */
520 		error = smb_vc_create(ossn, scred, &vcp);
521 		if (error == 0)
522 			*vcpp = vcp;
523 	} else
524 		error = ENOENT;
525 
526 out:
527 	SMB_CO_UNLOCK(&smb_vclist);
528 	return (error);
529 }
530 
531 
532 /*
533  * Helper functions that operate on VCs
534  */
535 
536 /*
537  * Get a pointer to the IP address suitable for passing to Trusted
538  * Extensions find_tpc() routine.  Used by smbfs_mount_label_policy().
539  * Compare this code to nfs_mount_label_policy() if problems arise.
540  */
541 void *
542 smb_vc_getipaddr(struct smb_vc *vcp, int *ipvers)
543 {
544 	smbioc_ssn_ident_t *id = &vcp->vc_ssn.ssn_id;
545 	void *ret;
546 
547 	switch (id->id_srvaddr.sa.sa_family) {
548 	case AF_INET:
549 		*ipvers = IPV4_VERSION;
550 		ret = &id->id_srvaddr.sin.sin_addr;
551 		break;
552 
553 	case AF_INET6:
554 		*ipvers = IPV6_VERSION;
555 		ret = &id->id_srvaddr.sin6.sin6_addr;
556 		break;
557 	default:
558 		SMBSDEBUG("invalid address family %d\n",
559 		    id->id_srvaddr.sa.sa_family);
560 		*ipvers = 0;
561 		ret = NULL;
562 		break;
563 	}
564 	return (ret);
565 }
566 
567 void
568 smb_vc_walkshares(struct smb_vc *vcp,
569 	walk_share_func_t func)
570 {
571 	smb_connobj_t *co;
572 	smb_share_t *ssp;
573 
574 	/*
575 	 * Walk the share list calling func(ssp, arg)
576 	 */
577 	SMB_VC_LOCK(vcp);
578 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
579 		ssp = CPTOSS(co);
580 		SMB_SS_LOCK(ssp);
581 		func(ssp);
582 		SMB_SS_UNLOCK(ssp);
583 	}
584 	SMB_VC_UNLOCK(vcp);
585 }
586 
587 
588 /*
589  * Share implementation
590  */
591 
592 void
593 smb_share_hold(struct smb_share *ssp)
594 {
595 	smb_co_hold(SSTOCP(ssp));
596 }
597 
598 void
599 smb_share_rele(struct smb_share *ssp)
600 {
601 	smb_co_rele(SSTOCP(ssp));
602 }
603 
604 void
605 smb_share_kill(struct smb_share *ssp)
606 {
607 	smb_co_kill(SSTOCP(ssp));
608 }
609 
610 /*
611  * Normally called via smb_share_rele()
612  * after co_usecount drops to zero.
613  * Also called via: smb_share_kill()
614  */
615 static void
616 smb_share_gone(struct smb_connobj *cp)
617 {
618 	struct smb_cred scred;
619 	struct smb_share *ssp = CPTOSS(cp);
620 
621 	smb_credinit(&scred, NULL);
622 	smb_iod_shutdown_share(ssp);
623 	(void) smb_smb_treedisconnect(ssp, &scred);
624 	smb_credrele(&scred);
625 }
626 
627 /*
628  * Normally called via smb_share_rele()
629  * after co_usecount drops to zero.
630  */
631 static void
632 smb_share_free(struct smb_connobj *cp)
633 {
634 	struct smb_share *ssp = CPTOSS(cp);
635 
636 	cv_destroy(&ssp->ss_conn_done);
637 	smb_co_done(SSTOCP(ssp));
638 	kmem_free(ssp, sizeof (*ssp));
639 }
640 
641 /*
642  * Allocate share structure and attach it to the given VC
643  * Connection expected to be locked on entry. Share will be returned
644  * in locked state.
645  */
646 /*ARGSUSED*/
647 int
648 smb_share_create(smbioc_tcon_t *tcon, struct smb_vc *vcp,
649 	struct smb_share **sspp, struct smb_cred *scred)
650 {
651 	static char objtype[] = "smb_ss";
652 	struct smb_share *ssp;
653 
654 	ASSERT(MUTEX_HELD(&vcp->vc_lock));
655 
656 	ssp = kmem_zalloc(sizeof (struct smb_share), KM_SLEEP);
657 	smb_co_init(SSTOCP(ssp), SMBL_SHARE, objtype);
658 	ssp->ss_co.co_free = smb_share_free;
659 	ssp->ss_co.co_gone = smb_share_gone;
660 
661 	cv_init(&ssp->ss_conn_done, objtype, CV_DRIVER, NULL);
662 	ssp->ss_tid = SMB_TID_UNKNOWN;
663 
664 	bcopy(&tcon->tc_sh, &ssp->ss_ioc,
665 	    sizeof (smbioc_oshare_t));
666 
667 	smb_co_addchild(VCTOCP(vcp), SSTOCP(ssp));
668 	*sspp = ssp;
669 
670 	return (0);
671 }
672 
673 /*
674  * Find or create a share under the given VC
675  * and return it with a "hold", but not locked.
676  */
677 
678 int
679 smb_share_findcreate(smbioc_tcon_t *tcon, struct smb_vc *vcp,
680 	struct smb_share **sspp, struct smb_cred *scred)
681 {
682 	struct smb_connobj *co;
683 	struct smb_share *ssp = NULL;
684 	int error = 0;
685 
686 	*sspp = NULL;
687 
688 	SMB_VC_LOCK(vcp);
689 
690 	/* var, head, next_field */
691 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
692 		ssp = CPTOSS(co);
693 
694 		/* Share name */
695 		if (u8_strcmp(ssp->ss_name, tcon->tc_sh.sh_name, 0,
696 		    U8_STRCMP_CI_LOWER, U8_UNICODE_LATEST, &error))
697 			continue;
698 
699 		/*
700 		 * We have a match, but still have to check
701 		 * the _GONE flag, and do that with a lock.
702 		 * No new references when _GONE is set.
703 		 *
704 		 * Also clear SMBSOPT_CREATE which the caller
705 		 * may check to find out if we did create.
706 		 */
707 		SMB_SS_LOCK(ssp);
708 		if ((ssp->ss_flags & SMBS_GONE) == 0) {
709 			tcon->tc_opt &= ~SMBSOPT_CREATE;
710 			/*
711 			 * Return it held, unlocked.
712 			 * In-line smb_share_hold here.
713 			 */
714 			co->co_usecount++;
715 			SMB_SS_UNLOCK(ssp);
716 			*sspp = ssp;
717 			error = 0;
718 			goto out;
719 		}
720 		SMB_SS_UNLOCK(ssp);
721 		/* keep looking. */
722 	}
723 	ssp = NULL;
724 
725 	/* Note: vcp (list of shares) is still locked. */
726 
727 	if (tcon->tc_opt & SMBSOPT_CREATE) {
728 		/*
729 		 * Create a new share.  It starts out with
730 		 * hold count = 1, so don't incr. here.
731 		 */
732 		error = smb_share_create(tcon, vcp, &ssp, scred);
733 		if (error == 0)
734 			*sspp = ssp;
735 	} else
736 		error = ENOENT;
737 
738 out:
739 	SMB_VC_UNLOCK(vcp);
740 	return (error);
741 }
742 
743 
744 /*
745  * Helper functions that operate on shares
746  */
747 
748 /*
749  * Mark this share as invalid, so consumers will know
750  * their file handles have become invalid.
751  *
752  * Most share consumers store a copy of ss_vcgenid when
753  * opening a file handle and compare that with what's in
754  * the share before using a file handle.  If the genid
755  * doesn't match, the file handle has become "stale"
756  * due to disconnect.  Therefore, zap ss_vcgenid here.
757  */
758 void
759 smb_share_invalidate(struct smb_share *ssp)
760 {
761 
762 	ASSERT(MUTEX_HELD(&ssp->ss_lock));
763 
764 	ssp->ss_flags &= ~SMBS_CONNECTED;
765 	ssp->ss_tid = SMB_TID_UNKNOWN;
766 	ssp->ss_vcgenid = 0;
767 }
768 
769 /*
770  * Connect (or reconnect) a share object.
771  *
772  * Called by smb_usr_get_tree() for new connections,
773  * and called by smb_rq_enqueue() for reconnect.
774  */
775 int
776 smb_share_tcon(smb_share_t *ssp, smb_cred_t *scred)
777 {
778 	clock_t tmo;
779 	int error;
780 
781 	SMB_SS_LOCK(ssp);
782 
783 	if (ssp->ss_flags & SMBS_CONNECTED) {
784 		SMBIODEBUG("alread connected?");
785 		error = 0;
786 		goto out;
787 	}
788 
789 	/*
790 	 * Wait for completion of any state changes
791 	 * that might be underway.
792 	 */
793 	while (ssp->ss_flags & SMBS_RECONNECTING) {
794 		ssp->ss_conn_waiters++;
795 		tmo = cv_wait_sig(&ssp->ss_conn_done, &ssp->ss_lock);
796 		ssp->ss_conn_waiters--;
797 		if (tmo == 0) {
798 			/* Interrupt! */
799 			error = EINTR;
800 			goto out;
801 		}
802 	}
803 
804 	/* Did someone else do it for us? */
805 	if (ssp->ss_flags & SMBS_CONNECTED) {
806 		error = 0;
807 		goto out;
808 	}
809 
810 	/*
811 	 * OK, we'll do the work.
812 	 */
813 	ssp->ss_flags |= SMBS_RECONNECTING;
814 
815 	/*
816 	 * Drop the lock while doing the TCON.
817 	 * On success, sets ss_tid, ss_vcgenid,
818 	 * and ss_flags |= SMBS_CONNECTED;
819 	 */
820 	SMB_SS_UNLOCK(ssp);
821 	error = smb_smb_treeconnect(ssp, scred);
822 	SMB_SS_LOCK(ssp);
823 
824 	ssp->ss_flags &= ~SMBS_RECONNECTING;
825 
826 	/* They can all go ahead! */
827 	if (ssp->ss_conn_waiters)
828 		cv_broadcast(&ssp->ss_conn_done);
829 
830 out:
831 	SMB_SS_UNLOCK(ssp);
832 
833 	return (error);
834 }
835 
836 /*
837  * Solaris zones support
838  */
839 /*ARGSUSED*/
840 void
841 lingering_vc(struct smb_vc *vc)
842 {
843 	/* good place for a breakpoint */
844 	DEBUG_ENTER("lingering VC");
845 }
846 
847 /*
848  * On zone shutdown, kill any IOD threads still running in this zone.
849  */
850 /* ARGSUSED */
851 void
852 nsmb_zone_shutdown(zoneid_t zoneid, void *data)
853 {
854 	struct smb_connobj *co;
855 	struct smb_vc *vcp;
856 
857 	SMB_CO_LOCK(&smb_vclist);
858 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
859 		vcp = CPTOVC(co);
860 
861 		if (vcp->vc_zoneid != zoneid)
862 			continue;
863 
864 		/*
865 		 * This will close the connection, and
866 		 * cause the IOD thread to terminate.
867 		 */
868 		smb_vc_kill(vcp);
869 	}
870 	SMB_CO_UNLOCK(&smb_vclist);
871 }
872 
873 /*
874  * On zone destroy, kill any IOD threads and free all resources they used.
875  */
876 /* ARGSUSED */
877 void
878 nsmb_zone_destroy(zoneid_t zoneid, void *data)
879 {
880 	struct smb_connobj *co;
881 	struct smb_vc *vcp;
882 
883 	/*
884 	 * We will repeat what should have already happened
885 	 * in zone_shutdown to make things go away.
886 	 *
887 	 * There should have been an smb_vc_rele call
888 	 * by now for all VCs in the zone.  If not,
889 	 * there's probably more we needed to do in
890 	 * the shutdown call.
891 	 */
892 
893 	SMB_CO_LOCK(&smb_vclist);
894 
895 	if (smb_vclist.co_usecount > 1) {
896 		SMBERROR("%d connections still active\n",
897 		    smb_vclist.co_usecount - 1);
898 	}
899 
900 	/* var, head, next_field */
901 	SLIST_FOREACH(co, &smb_vclist.co_children, co_next) {
902 		vcp = CPTOVC(co);
903 
904 		if (vcp->vc_zoneid != zoneid)
905 			continue;
906 
907 		/* Debugging */
908 		lingering_vc(vcp);
909 	}
910 
911 	SMB_CO_UNLOCK(&smb_vclist);
912 }
913