xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision 40c0e2317898b8c774791bdc2b30bd50111ab1fa)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  *
39  * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
40  */
41 
42 #ifdef DEBUG
43 /* See sys/queue.h */
44 #define	QUEUEDEBUG 1
45 #endif
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/atomic.h>
50 #include <sys/proc.h>
51 #include <sys/thread.h>
52 #include <sys/file.h>
53 #include <sys/kmem.h>
54 #include <sys/unistd.h>
55 #include <sys/mount.h>
56 #include <sys/vnode.h>
57 #include <sys/types.h>
58 #include <sys/ddi.h>
59 #include <sys/sunddi.h>
60 #include <sys/stream.h>
61 #include <sys/strsun.h>
62 #include <sys/time.h>
63 #include <sys/class.h>
64 #include <sys/disp.h>
65 #include <sys/cmn_err.h>
66 #include <sys/zone.h>
67 #include <sys/sdt.h>
68 
69 #include <netsmb/smb_osdep.h>
70 
71 #include <netsmb/smb.h>
72 #include <netsmb/smb_conn.h>
73 #include <netsmb/smb_rq.h>
74 #include <netsmb/smb_subr.h>
75 #include <netsmb/smb_tran.h>
76 #include <netsmb/smb_trantcp.h>
77 
78 /*
79  * SMB messages are up to 64K.
80  * Let's leave room for two.
81  */
82 static int smb_tcpsndbuf = 0x20000;
83 static int smb_tcprcvbuf = 0x20000;
84 static int smb_connect_timeout = 10; /* seconds */
85 
86 int smb_iod_send_echo(smb_vc_t *);
87 
88 #ifdef	_FAKE_KERNEL
89 extern void tsignal(kthread_t *, int);
90 #endif
91 
92 /*
93  * This is set/cleared when smbfs loads/unloads
94  * No locks should be necessary, because smbfs
95  * can't unload until all the mounts are gone.
96  */
97 static smb_fscb_t *fscb;
98 void
99 smb_fscb_set(smb_fscb_t *cb)
100 {
101 	fscb = cb;
102 }
103 
104 static void
105 smb_iod_share_disconnected(smb_share_t *ssp)
106 {
107 
108 	smb_share_invalidate(ssp);
109 
110 	/* smbfs_dead() */
111 	if (fscb && fscb->fscb_disconn) {
112 		fscb->fscb_disconn(ssp);
113 	}
114 }
115 
116 /*
117  * State changes are important and infrequent.
118  * Make them easily observable via dtrace.
119  */
120 void
121 smb_iod_newstate(struct smb_vc *vcp, int state)
122 {
123 	vcp->vc_state = state;
124 }
125 
126 /* Lock Held version of the next function. */
127 static inline void
128 smb_iod_rqprocessed_LH(
129 	struct smb_rq *rqp,
130 	int error,
131 	int flags)
132 {
133 	rqp->sr_flags |= flags;
134 	rqp->sr_lerror = error;
135 	rqp->sr_rpgen++;
136 	rqp->sr_state = SMBRQ_NOTIFIED;
137 	cv_broadcast(&rqp->sr_cond);
138 }
139 
140 static void
141 smb_iod_rqprocessed(
142 	struct smb_rq *rqp,
143 	int error,
144 	int flags)
145 {
146 
147 	SMBRQ_LOCK(rqp);
148 	smb_iod_rqprocessed_LH(rqp, error, flags);
149 	SMBRQ_UNLOCK(rqp);
150 }
151 
152 static void
153 smb_iod_invrq(struct smb_vc *vcp)
154 {
155 	struct smb_rq *rqp;
156 
157 	/*
158 	 * Invalidate all outstanding requests for this connection
159 	 */
160 	rw_enter(&vcp->iod_rqlock, RW_READER);
161 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
162 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
163 	}
164 	rw_exit(&vcp->iod_rqlock);
165 }
166 
167 /*
168  * Called by smb_vc_rele, smb_vc_kill, and by the driver
169  * close entry point if the IOD closes its dev handle.
170  *
171  * Forcibly kill the connection and IOD.
172  */
173 void
174 smb_iod_disconnect(struct smb_vc *vcp)
175 {
176 
177 	/*
178 	 * Inform everyone of the state change.
179 	 */
180 	SMB_VC_LOCK(vcp);
181 	if (vcp->vc_state != SMBIOD_ST_DEAD) {
182 		smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
183 		cv_broadcast(&vcp->vc_statechg);
184 	}
185 	SMB_VC_UNLOCK(vcp);
186 
187 	/*
188 	 * Let's be safe here and avoid doing any
189 	 * call across the network while trying to
190 	 * shut things down.  If we just disconnect,
191 	 * the server will take care of the logoff.
192 	 */
193 	SMB_TRAN_DISCONNECT(vcp);
194 }
195 
196 /*
197  * Send one request.
198  *
199  * Called by _addrq (for internal requests)
200  * and _sendall (via _addrq, _multirq, _waitrq)
201  */
202 static int
203 smb_iod_sendrq(struct smb_rq *rqp)
204 {
205 	struct smb_vc *vcp = rqp->sr_vc;
206 	mblk_t *m;
207 	int error;
208 
209 	ASSERT(vcp);
210 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
211 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
212 
213 	/*
214 	 * Internal requests are allowed in any state;
215 	 * otherwise should be active.
216 	 */
217 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
218 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
219 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
220 		return (ENOTCONN);
221 	}
222 
223 	/*
224 	 * On the first send, set the MID and (maybe)
225 	 * the signing sequence numbers.  The increments
226 	 * here are serialized by vc_sendlock
227 	 */
228 	if (rqp->sr_sendcnt == 0) {
229 
230 		rqp->sr_mid = vcp->vc_next_mid++;
231 
232 		if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
233 		    SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
234 			/*
235 			 * We're signing requests and verifying
236 			 * signatures on responses.  Set the
237 			 * sequence numbers of the request and
238 			 * response here, used in smb_rq_verify.
239 			 * Note we have the signing flag during
240 			 * session setup but no key yet, and
241 			 * don't want sequence numbers there.
242 			 */
243 			rqp->sr_seqno = vcp->vc_next_seq++;
244 			rqp->sr_rseqno = vcp->vc_next_seq++;
245 		}
246 
247 		/* Fill in UID, TID, MID, etc. */
248 		smb_rq_fillhdr(rqp);
249 
250 		/*
251 		 * Sign the message now that we're finally done
252 		 * filling in the SMB header fields, etc.
253 		 */
254 		if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
255 			smb_rq_sign(rqp);
256 		}
257 	}
258 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
259 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
260 		/*
261 		 * If all attempts to send a request failed, then
262 		 * something is seriously hosed.
263 		 */
264 		return (ENOTCONN);
265 	}
266 
267 	/*
268 	 * Replaced m_copym() with Solaris copymsg() which does the same
269 	 * work when we want to do a M_COPYALL.
270 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
271 	 */
272 	m = copymsg(rqp->sr_rq.mb_top);
273 
274 	DTRACE_PROBE2(smb_iod_sendrq,
275 	    (smb_rq_t *), rqp, (mblk_t *), m);
276 	m_dumpm(m);
277 
278 	if (m != NULL) {
279 		error = SMB_TRAN_SEND(vcp, m);
280 		m = 0; /* consumed by SEND */
281 	} else
282 		error = ENOBUFS;
283 
284 	rqp->sr_lerror = error;
285 	if (error == 0) {
286 		SMBRQ_LOCK(rqp);
287 		rqp->sr_flags |= SMBR_SENT;
288 		rqp->sr_state = SMBRQ_SENT;
289 		if (rqp->sr_flags & SMBR_SENDWAIT)
290 			cv_broadcast(&rqp->sr_cond);
291 		SMBRQ_UNLOCK(rqp);
292 		return (0);
293 	}
294 	/*
295 	 * Check for fatal errors
296 	 */
297 	if (SMB_TRAN_FATAL(vcp, error)) {
298 		/*
299 		 * No further attempts should be made
300 		 */
301 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
302 		return (ENOTCONN);
303 	}
304 	if (error)
305 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
306 
307 	return (0);
308 }
309 
310 static int
311 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
312 {
313 	mblk_t *m;
314 	uchar_t *hp;
315 	int error;
316 
317 top:
318 	m = NULL;
319 	error = SMB_TRAN_RECV(vcp, &m);
320 	if (error == EAGAIN)
321 		goto top;
322 	if (error)
323 		return (error);
324 	ASSERT(m);
325 
326 	m = m_pullup(m, SMB_HDRLEN);
327 	if (m == NULL) {
328 		return (ENOSR);
329 	}
330 
331 	/*
332 	 * Check the SMB header
333 	 */
334 	hp = mtod(m, uchar_t *);
335 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
336 		m_freem(m);
337 		return (EPROTO);
338 	}
339 
340 	*mpp = m;
341 	return (0);
342 }
343 
344 /*
345  * Process incoming packets
346  *
347  * This is the "reader" loop, run by the IOD thread.  Normally we're in
348  * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
349  * other states with poll==TRUE
350  *
351  * A non-zero error return here causes the IOD work loop to terminate.
352  */
353 int
354 smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
355 {
356 	struct smb_rq *rqp;
357 	mblk_t *m;
358 	uchar_t *hp;
359 	ushort_t mid;
360 	int error = 0;
361 	int etime_count = 0; /* for "server not responding", etc. */
362 
363 	for (;;) {
364 		/*
365 		 * Check whether someone "killed" this VC,
366 		 * or is asking the IOD to terminate.
367 		 */
368 
369 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
370 			SMBIODEBUG("SHUTDOWN set\n");
371 			/* This IOD thread will terminate. */
372 			SMB_VC_LOCK(vcp);
373 			smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
374 			cv_broadcast(&vcp->vc_statechg);
375 			SMB_VC_UNLOCK(vcp);
376 			error = EINTR;
377 			break;
378 		}
379 
380 		m = NULL;
381 		error = smb_iod_recv1(vcp, &m);
382 
383 		/*
384 		 * Internal requests (reconnecting) call this in a loop
385 		 * (with poll==TRUE) until the request completes.
386 		 */
387 		if (error == ETIME && poll)
388 			break;
389 
390 		if (error == ETIME &&
391 		    vcp->iod_rqlist.tqh_first != NULL) {
392 			/*
393 			 * Nothing received for 15 seconds and
394 			 * we have requests in the queue.
395 			 */
396 			etime_count++;
397 
398 			/*
399 			 * Once, at 15 sec. notify callbacks
400 			 * and print the warning message.
401 			 */
402 			if (etime_count == 1) {
403 				/* Was: smb_iod_notify_down(vcp); */
404 				if (fscb && fscb->fscb_down)
405 					smb_vc_walkshares(vcp,
406 					    fscb->fscb_down);
407 				zprintf(vcp->vc_zoneid,
408 				    "SMB server %s not responding\n",
409 				    vcp->vc_srvname);
410 			}
411 
412 			/*
413 			 * At 30 sec. try sending an echo, and then
414 			 * once a minute thereafter.
415 			 */
416 			if ((etime_count & 3) == 2) {
417 				(void) smb_iod_send_echo(vcp);
418 			}
419 
420 			continue;
421 		} /* ETIME && requests in queue */
422 
423 		if (error == ETIME) {	/* and req list empty */
424 			/*
425 			 * If the IOD thread holds the last reference
426 			 * to this VC, let it become IDLE, and then
427 			 * let it be destroyed if not used.
428 			 */
429 			if (vcp->vc_co.co_usecount > 1)
430 				continue;
431 			SMB_VC_LOCK(vcp);
432 			if (vcp->vc_co.co_usecount == 1) {
433 				smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
434 				SMB_VC_UNLOCK(vcp);
435 				error = 0;
436 				break;
437 			}
438 			SMB_VC_UNLOCK(vcp);
439 			continue;
440 		} /* error == ETIME */
441 
442 		if (error) {
443 			/*
444 			 * The recv. above returned some error
445 			 * we can't continue from i.e. ENOTCONN.
446 			 * It's dangerous to continue here.
447 			 * (possible infinite loop!)
448 			 *
449 			 * If this VC has shares, try reconnect;
450 			 * otherwise let this VC die now.
451 			 */
452 			SMB_VC_LOCK(vcp);
453 			if (vcp->vc_co.co_usecount > 1)
454 				smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
455 			else
456 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
457 			cv_broadcast(&vcp->vc_statechg);
458 			SMB_VC_UNLOCK(vcp);
459 			break;
460 		}
461 
462 		/*
463 		 * Received something.  Yea!
464 		 */
465 		if (etime_count) {
466 			etime_count = 0;
467 
468 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
469 			    vcp->vc_srvname);
470 
471 			/* Was: smb_iod_notify_up(vcp); */
472 			if (fscb && fscb->fscb_up)
473 				smb_vc_walkshares(vcp, fscb->fscb_up);
474 		}
475 
476 		/*
477 		 * Have an SMB packet.  The SMB header was
478 		 * checked in smb_iod_recv1().
479 		 * Find the request...
480 		 */
481 		hp = mtod(m, uchar_t *);
482 		/*LINTED*/
483 		mid = letohs(SMB_HDRMID(hp));
484 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
485 
486 		rw_enter(&vcp->iod_rqlock, RW_READER);
487 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
488 
489 			if (rqp->sr_mid != mid)
490 				continue;
491 
492 			DTRACE_PROBE2(smb_iod_recvrq,
493 			    (smb_rq_t *), rqp, (mblk_t *), m);
494 			m_dumpm(m);
495 
496 			SMBRQ_LOCK(rqp);
497 			if (rqp->sr_rp.md_top == NULL) {
498 				md_initm(&rqp->sr_rp, m);
499 			} else {
500 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
501 					md_append_record(&rqp->sr_rp, m);
502 				} else {
503 					SMBRQ_UNLOCK(rqp);
504 					SMBSDEBUG("duplicate response %d "
505 					    "(ignored)\n", mid);
506 					break;
507 				}
508 			}
509 			smb_iod_rqprocessed_LH(rqp, 0, 0);
510 			SMBRQ_UNLOCK(rqp);
511 			break;
512 		}
513 
514 		if (rqp == NULL) {
515 			int cmd = SMB_HDRCMD(hp);
516 
517 			if (cmd != SMB_COM_ECHO)
518 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
519 				    (uint_t)mid, cmd);
520 			m_freem(m);
521 		}
522 		rw_exit(&vcp->iod_rqlock);
523 
524 		/*
525 		 * Reconnect calls this in a loop with poll=TRUE
526 		 * We've received a response, so break now.
527 		 */
528 		if (poll) {
529 			error = 0;
530 			break;
531 		}
532 	}
533 
534 	return (error);
535 }
536 
537 /*
538  * The IOD receiver thread has requests pending and
539  * has not received anything in a while.  Try to
540  * send an SMB echo request.  It's tricky to do a
541  * send from the IOD thread because we can't block.
542  *
543  * Using tmo=SMBNOREPLYWAIT in the request
544  * so smb_rq_reply will skip smb_iod_waitrq.
545  * The smb_smb_echo call uses SMBR_INTERNAL
546  * to avoid calling smb_iod_sendall().
547  */
548 int
549 smb_iod_send_echo(smb_vc_t *vcp)
550 {
551 	smb_cred_t scred;
552 	int err;
553 
554 	smb_credinit(&scred, NULL);
555 	err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT);
556 	smb_credrele(&scred);
557 	return (err);
558 }
559 
560 /*
561  * The IOD thread is now just a "reader",
562  * so no more smb_iod_request().  Yea!
563  */
564 
565 /*
566  * Place request in the queue, and send it now if possible.
567  * Called with no locks held.
568  */
569 int
570 smb_iod_addrq(struct smb_rq *rqp)
571 {
572 	struct smb_vc *vcp = rqp->sr_vc;
573 	int error, save_newrq;
574 
575 	ASSERT(rqp->sr_cred);
576 
577 	/*
578 	 * Requests from the IOD itself are marked _INTERNAL,
579 	 * and get some special treatment to avoid blocking
580 	 * the reader thread (so we don't deadlock).
581 	 * The request is not yet on the queue, so we can
582 	 * modify it's state here without locks.
583 	 */
584 	rqp->sr_owner = curthread;
585 	if (rqp->sr_owner == vcp->iod_thr) {
586 		rqp->sr_flags |= SMBR_INTERNAL;
587 
588 		/*
589 		 * This is a request from the IOD thread.
590 		 * Always send directly from this thread.
591 		 * Note lock order: iod_rqlist, vc_sendlock
592 		 */
593 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
594 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
595 		rw_downgrade(&vcp->iod_rqlock);
596 
597 		/*
598 		 * Note: iod_sendrq expects vc_sendlock,
599 		 * so take that here, but carefully:
600 		 * Never block the IOD thread here.
601 		 */
602 		if (sema_tryp(&vcp->vc_sendlock) == 0) {
603 			SMBIODEBUG("sendlock busy\n");
604 			error = EAGAIN;
605 		} else {
606 			/* Have vc_sendlock */
607 			error = smb_iod_sendrq(rqp);
608 			sema_v(&vcp->vc_sendlock);
609 		}
610 
611 		rw_exit(&vcp->iod_rqlock);
612 
613 		/*
614 		 * In the non-error case, _removerq
615 		 * is done by either smb_rq_reply
616 		 * or smb_iod_waitrq.
617 		 */
618 		if (error)
619 			smb_iod_removerq(rqp);
620 
621 		return (error);
622 	} else {
623 		/*
624 		 * State should be correct after the check in
625 		 * smb_rq_enqueue(), but we dropped locks...
626 		 */
627 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
628 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
629 			return (ENOTCONN);
630 		}
631 	}
632 
633 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
634 
635 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
636 	/* iod_rqlock/WRITER protects iod_newrq */
637 	save_newrq = vcp->iod_newrq;
638 	vcp->iod_newrq++;
639 
640 	rw_exit(&vcp->iod_rqlock);
641 
642 	/*
643 	 * Now send any requests that need to be sent,
644 	 * including the one we just put on the list.
645 	 * Only the thread that found iod_newrq==0
646 	 * needs to run the send loop.
647 	 */
648 	if (save_newrq == 0)
649 		smb_iod_sendall(vcp);
650 
651 	return (0);
652 }
653 
654 /*
655  * Mark an SMBR_MULTIPACKET request as
656  * needing another send.  Similar to the
657  * "normal" part of smb_iod_addrq.
658  */
659 int
660 smb_iod_multirq(struct smb_rq *rqp)
661 {
662 	struct smb_vc *vcp = rqp->sr_vc;
663 	int save_newrq;
664 
665 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
666 
667 	if (rqp->sr_flags & SMBR_INTERNAL)
668 		return (EINVAL);
669 
670 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
671 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
672 		return (ENOTCONN);
673 	}
674 
675 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
676 
677 	/* Already on iod_rqlist, just reset state. */
678 	rqp->sr_state = SMBRQ_NOTSENT;
679 
680 	/* iod_rqlock/WRITER protects iod_newrq */
681 	save_newrq = vcp->iod_newrq;
682 	vcp->iod_newrq++;
683 
684 	rw_exit(&vcp->iod_rqlock);
685 
686 	/*
687 	 * Now send any requests that need to be sent,
688 	 * including the one we just marked NOTSENT.
689 	 * Only the thread that found iod_newrq==0
690 	 * needs to run the send loop.
691 	 */
692 	if (save_newrq == 0)
693 		smb_iod_sendall(vcp);
694 
695 	return (0);
696 }
697 
698 
699 void
700 smb_iod_removerq(struct smb_rq *rqp)
701 {
702 	struct smb_vc *vcp = rqp->sr_vc;
703 
704 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
705 #ifdef QUEUEDEBUG
706 	/*
707 	 * Make sure we have not already removed it.
708 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
709 	 * XXX: Don't like the constant 1 here...
710 	 */
711 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
712 #endif
713 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
714 	rw_exit(&vcp->iod_rqlock);
715 }
716 
717 
718 
719 /*
720  * Wait for a request to complete.
721  *
722  * For normal requests, we need to deal with
723  * ioc_muxcnt dropping below vc_maxmux by
724  * making arrangements to send more...
725  */
726 int
727 smb_iod_waitrq(struct smb_rq *rqp)
728 {
729 	struct smb_vc *vcp = rqp->sr_vc;
730 	clock_t tr, tmo1, tmo2;
731 	int error, rc;
732 
733 	if (rqp->sr_flags & SMBR_INTERNAL) {
734 		int timeleft = rqp->sr_timo;
735 
736 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
737 	again:
738 		smb_iod_sendall(vcp);
739 		error = smb_iod_recvall(vcp, B_TRUE);
740 		if (error == ETIME) {
741 			/* We waited SMB_NBTIMO sec. */
742 			timeleft -= SMB_NBTIMO;
743 			if (timeleft > 0)
744 				goto again;
745 		}
746 
747 		smb_iod_removerq(rqp);
748 		if (rqp->sr_state != SMBRQ_NOTIFIED)
749 			error = ETIME;
750 
751 		return (error);
752 	}
753 
754 	/*
755 	 * Make sure this is NOT the IOD thread,
756 	 * or the wait below will stop the reader.
757 	 */
758 	ASSERT(curthread != vcp->iod_thr);
759 
760 	SMBRQ_LOCK(rqp);
761 
762 	/*
763 	 * First, wait for the request to be sent.  Normally the send
764 	 * has already happened by the time we get here.  However, if
765 	 * we have more than maxmux entries in the request list, our
766 	 * request may not be sent until other requests complete.
767 	 * The wait in this case is due to local I/O demands, so
768 	 * we don't want the server response timeout to apply.
769 	 *
770 	 * If a request is allowed to interrupt this wait, then the
771 	 * request is cancelled and never sent OTW.  Some kinds of
772 	 * requests should never be cancelled (i.e. close) and those
773 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
774 	 * or a connection close will terminate them with ENOTCONN.
775 	 */
776 	while (rqp->sr_state == SMBRQ_NOTSENT) {
777 		rqp->sr_flags |= SMBR_SENDWAIT;
778 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
779 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
780 			rc = 1;
781 		} else
782 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
783 		rqp->sr_flags &= ~SMBR_SENDWAIT;
784 		if (rc == 0) {
785 			SMBIODEBUG("EINTR in sendwait, rq=%p\n", (void *)rqp);
786 			error = EINTR;
787 			goto out;
788 		}
789 	}
790 
791 	/*
792 	 * The request has been sent.  Now wait for the response,
793 	 * with the timeout specified for this request.
794 	 * Compute all the deadlines now, so we effectively
795 	 * start the timer(s) after the request is sent.
796 	 */
797 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
798 		tmo1 = SEC_TO_TICK(smb_timo_notice);
799 	else
800 		tmo1 = 0;
801 	tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
802 
803 	/*
804 	 * As above, we don't want to allow interrupt for some
805 	 * requests like open, because we could miss a succesful
806 	 * response and therefore "leak" a FID.  Such requests
807 	 * are marked SMBR_NOINTR_RECV to prevent that.
808 	 *
809 	 * If "slow server" warnings are enabled, wait first
810 	 * for the "notice" timeout, and warn if expired.
811 	 */
812 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
813 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
814 			tr = cv_reltimedwait(&rqp->sr_cond,
815 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
816 		else
817 			tr = cv_reltimedwait_sig(&rqp->sr_cond,
818 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
819 		if (tr == 0) {
820 			error = EINTR;
821 			goto out;
822 		}
823 		if (tr < 0) {
824 			DTRACE_PROBE1(smb_iod_waitrq1,
825 			    (smb_rq_t *), rqp);
826 		}
827 	}
828 
829 	/*
830 	 * Keep waiting until tmo2 is expired.
831 	 */
832 	while (rqp->sr_rpgen == rqp->sr_rplast) {
833 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
834 			tr = cv_timedwait(&rqp->sr_cond,
835 			    &rqp->sr_lock, tmo2);
836 		else
837 			tr = cv_timedwait_sig(&rqp->sr_cond,
838 			    &rqp->sr_lock, tmo2);
839 		if (tr == 0) {
840 			error = EINTR;
841 			goto out;
842 		}
843 		if (tr < 0) {
844 			DTRACE_PROBE1(smb_iod_waitrq2,
845 			    (smb_rq_t *), rqp);
846 			error = ETIME;
847 			goto out;
848 		}
849 		/* got wakeup */
850 	}
851 	error = rqp->sr_lerror;
852 	rqp->sr_rplast++;
853 
854 out:
855 	SMBRQ_UNLOCK(rqp);
856 
857 	/*
858 	 * MULTIPACKET request must stay in the list.
859 	 * They may need additional responses.
860 	 */
861 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
862 		smb_iod_removerq(rqp);
863 
864 	/*
865 	 * Some request has been completed.
866 	 * If we reached the mux limit,
867 	 * re-run the send loop...
868 	 */
869 	if (vcp->iod_muxfull)
870 		smb_iod_sendall(vcp);
871 
872 	return (error);
873 }
874 
875 /*
876  * Shutdown all outstanding I/O requests on the specified share with
877  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
878  * non-forced unmount; if this is a forced unmount, we have to shutdown
879  * the requests as part of the unmount process.)
880  */
881 void
882 smb_iod_shutdown_share(struct smb_share *ssp)
883 {
884 	struct smb_vc *vcp = SSTOVC(ssp);
885 	struct smb_rq *rqp;
886 
887 	/*
888 	 * Loop through the list of requests and shutdown the ones
889 	 * that are for the specified share.
890 	 */
891 	rw_enter(&vcp->iod_rqlock, RW_READER);
892 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
893 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
894 			smb_iod_rqprocessed(rqp, EIO, 0);
895 	}
896 	rw_exit(&vcp->iod_rqlock);
897 }
898 
899 /*
900  * Send all requests that need sending.
901  * Called from _addrq, _multirq, _waitrq
902  */
903 void
904 smb_iod_sendall(smb_vc_t *vcp)
905 {
906 	struct smb_rq *rqp;
907 	int error, muxcnt;
908 
909 	/*
910 	 * Clear "newrq" to make sure threads adding
911 	 * new requests will run this function again.
912 	 */
913 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
914 	vcp->iod_newrq = 0;
915 
916 	/*
917 	 * We only read iod_rqlist, so downgrade rwlock.
918 	 * This allows the IOD to handle responses while
919 	 * some requesting thread may be blocked in send.
920 	 */
921 	rw_downgrade(&vcp->iod_rqlock);
922 
923 	/*
924 	 * Serialize to prevent multiple senders.
925 	 * Note lock order: iod_rqlock, vc_sendlock
926 	 */
927 	sema_p(&vcp->vc_sendlock);
928 
929 	/*
930 	 * Walk the list of requests and send when possible.
931 	 * We avoid having more than vc_maxmux requests
932 	 * outstanding to the server by traversing only
933 	 * vc_maxmux entries into this list.  Simple!
934 	 */
935 	ASSERT(vcp->vc_maxmux > 0);
936 	error = muxcnt = 0;
937 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
938 
939 		if (rqp->sr_state == SMBRQ_NOTSENT) {
940 			error = smb_iod_sendrq(rqp);
941 			if (error)
942 				break;
943 		}
944 
945 		if (++muxcnt == vcp->vc_maxmux) {
946 			SMBIODEBUG("muxcnt == vc_maxmux\n");
947 			break;
948 		}
949 
950 	}
951 
952 	/*
953 	 * If we have vc_maxmux requests outstanding,
954 	 * arrange for _waitrq to call _sendall as
955 	 * requests are completed.
956 	 */
957 	vcp->iod_muxfull =
958 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
959 
960 	sema_v(&vcp->vc_sendlock);
961 	rw_exit(&vcp->iod_rqlock);
962 }
963 
964 /*
965  * Ioctl functions called by the user-level I/O Deamon (IOD)
966  * to bring up and service a connection to some SMB server.
967  */
968 
969 int
970 nsmb_iod_connect(struct smb_vc *vcp)
971 {
972 	int err, val;
973 
974 	ASSERT(vcp->iod_thr == curthread);
975 
976 	if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
977 		cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
978 		return (EINVAL);
979 	}
980 
981 	/*
982 	 * Set various options on this endpoint.
983 	 * Keep going in spite of errors.
984 	 */
985 	val = smb_tcpsndbuf;
986 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
987 	if (err != 0) {
988 		cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
989 	}
990 	val = smb_tcprcvbuf;
991 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
992 	if (err != 0) {
993 		cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
994 	}
995 	val = 1;
996 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
997 	if (err != 0) {
998 		cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
999 	}
1000 	val = 1;
1001 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
1002 	if (err != 0) {
1003 		cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY, err=%d", err);
1004 	}
1005 	val = smb_connect_timeout * 1000;
1006 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
1007 	if (err != 0) {
1008 		cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo, err=%d", err);
1009 	}
1010 
1011 	/*
1012 	 * Bind and connect
1013 	 */
1014 	err = SMB_TRAN_BIND(vcp, NULL);
1015 	if (err != 0) {
1016 		cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
1017 		/* Continue on and try connect. */
1018 	}
1019 	err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
1020 	/*
1021 	 * No cmn_err here, as connect failures are normal, i.e.
1022 	 * when a server has multiple addresses and only some are
1023 	 * routed for us. (libsmbfs tries them all)
1024 	 */
1025 	if (err == 0) {
1026 		SMB_VC_LOCK(vcp);
1027 		smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
1028 		SMB_VC_UNLOCK(vcp);
1029 	} /* else stay in state reconnect */
1030 
1031 	return (err);
1032 }
1033 
1034 /*
1035  * Do the whole SMB1/SMB2 negotiate
1036  */
1037 int
1038 nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
1039 {
1040 	struct smb_sopt *sv = &vcp->vc_sopt;
1041 	smb_cred_t scred;
1042 	int err = 0;
1043 
1044 	ASSERT(vcp->iod_thr == curthread);
1045 
1046 	if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
1047 		cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
1048 		return (EINVAL);
1049 	}
1050 
1051 	/*
1052 	 * (Re)init negotiated values
1053 	 */
1054 	bzero(sv, sizeof (*sv));
1055 	vcp->vc_next_seq = 0;
1056 
1057 	/*
1058 	 * If this was reconnect, get rid of the old MAC key
1059 	 * and session key.
1060 	 */
1061 	SMB_VC_LOCK(vcp);
1062 	if (vcp->vc_mackey != NULL) {
1063 		kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
1064 		vcp->vc_mackey = NULL;
1065 		vcp->vc_mackeylen = 0;
1066 	}
1067 	if (vcp->vc_ssnkey != NULL) {
1068 		kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
1069 		vcp->vc_ssnkey = NULL;
1070 		vcp->vc_ssnkeylen = 0;
1071 	}
1072 	SMB_VC_UNLOCK(vcp);
1073 
1074 	smb_credinit(&scred, cr);
1075 	err = smb_smb_negotiate(vcp, &scred);
1076 	smb_credrele(&scred);
1077 
1078 	if (err == 0) {
1079 		SMB_VC_LOCK(vcp);
1080 		smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
1081 		SMB_VC_UNLOCK(vcp);
1082 	}
1083 	/*
1084 	 * (else) leave state as it was.
1085 	 * User-level will report this error
1086 	 * and close this device handle.
1087 	 */
1088 
1089 	return (err);
1090 }
1091 
1092 /*
1093  * Do either SMB1 or SMB2 session setup.
1094  */
1095 int
1096 nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
1097 {
1098 	smb_cred_t scred;
1099 	int err;
1100 
1101 	ASSERT(vcp->iod_thr == curthread);
1102 
1103 	switch (vcp->vc_state) {
1104 	case SMBIOD_ST_NEGOTIATED:
1105 	case SMBIOD_ST_AUTHCONT:
1106 		break;
1107 	default:
1108 		return (EINVAL);
1109 	}
1110 
1111 	smb_credinit(&scred, cr);
1112 	// XXX if SMB1 else ...
1113 	err = smb_smb_ssnsetup(vcp, &scred);
1114 	smb_credrele(&scred);
1115 
1116 	SMB_VC_LOCK(vcp);
1117 	switch (err) {
1118 	case 0:
1119 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
1120 		break;
1121 	case EINPROGRESS:	/* MORE_PROCESSING_REQUIRED */
1122 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
1123 		break;
1124 	default:
1125 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
1126 		break;
1127 	}
1128 	SMB_VC_UNLOCK(vcp);
1129 
1130 	return (err);
1131 }
1132 
1133 /* ARGSUSED */
1134 int
1135 smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
1136 {
1137 	smbioc_ssn_work_t *wk = &vcp->vc_work;
1138 	int err = 0;
1139 
1140 	/*
1141 	 * This is called by the one-and-only
1142 	 * IOD thread for this VC.
1143 	 */
1144 	ASSERT(vcp->iod_thr == curthread);
1145 
1146 	/*
1147 	 * Should be in state...
1148 	 */
1149 	if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
1150 		cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
1151 		return (EINVAL);
1152 	}
1153 
1154 	/*
1155 	 * Update the session key and initialize SMB signing.
1156 	 *
1157 	 * This implementation does not use multiple SMB sessions per
1158 	 * TCP connection (where only the first session key is used)
1159 	 * so we always have a new session key here.  Sanity check the
1160 	 * length from user space.  Normally 16 or 32.
1161 	 */
1162 	if (wk->wk_u_ssnkey_len > 1024) {
1163 		cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
1164 		return (EINVAL);
1165 	}
1166 
1167 	ASSERT(vcp->vc_ssnkey == NULL);
1168 	SMB_VC_LOCK(vcp);
1169 	if (wk->wk_u_ssnkey_len != 0 &&
1170 	    wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
1171 		vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
1172 		vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
1173 		if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
1174 		    vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
1175 			err = EFAULT;
1176 		}
1177 	}
1178 	SMB_VC_UNLOCK(vcp);
1179 	if (err)
1180 		return (err);
1181 
1182 	/*
1183 	 * If we have a session key, derive the MAC key for SMB signing.
1184 	 * If this was a NULL session, we might have no session key.
1185 	 */
1186 	ASSERT(vcp->vc_mackey == NULL);
1187 	if (vcp->vc_ssnkey != NULL) {
1188 		err = smb_sign_init(vcp);
1189 		if (err != 0)
1190 			return (err);
1191 	}
1192 
1193 	/*
1194 	 * Tell any enqueued requests they can start.
1195 	 */
1196 	SMB_VC_LOCK(vcp);
1197 	vcp->vc_genid++;	/* possibly new connection */
1198 	smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1199 	cv_broadcast(&vcp->vc_statechg);
1200 	SMB_VC_UNLOCK(vcp);
1201 
1202 	/*
1203 	 * The above cv_broadcast should be sufficient to
1204 	 * get requests going again.
1205 	 *
1206 	 * If we have a callback function, run it.
1207 	 * Was: smb_iod_notify_connected()
1208 	 */
1209 	if (fscb && fscb->fscb_connect)
1210 		smb_vc_walkshares(vcp, fscb->fscb_connect);
1211 
1212 	/*
1213 	 * Run the "reader" loop.
1214 	 */
1215 	err = smb_iod_recvall(vcp, B_FALSE);
1216 
1217 	/*
1218 	 * The reader loop returned, so we must have a
1219 	 * new state.  (disconnected or reconnecting)
1220 	 *
1221 	 * Notify shares of the disconnect.
1222 	 * Was: smb_iod_notify_disconnect()
1223 	 */
1224 	smb_vc_walkshares(vcp, smb_iod_share_disconnected);
1225 
1226 	/*
1227 	 * The reader loop function returns only when
1228 	 * there's been an error on the connection, or
1229 	 * this VC has no more references.  It also
1230 	 * updates the state before it returns.
1231 	 *
1232 	 * Tell any requests to give up or restart.
1233 	 */
1234 	smb_iod_invrq(vcp);
1235 
1236 	return (err);
1237 }
1238 
1239 /*
1240  * Wait around for someone to ask to use this VC.
1241  * If the VC has only the IOD reference, then
1242  * wait only a minute or so, then drop it.
1243  */
1244 int
1245 smb_iod_vc_idle(struct smb_vc *vcp)
1246 {
1247 	clock_t tr, delta = SEC_TO_TICK(15);
1248 	int err = 0;
1249 
1250 	/*
1251 	 * This is called by the one-and-only
1252 	 * IOD thread for this VC.
1253 	 */
1254 	ASSERT(vcp->iod_thr == curthread);
1255 
1256 	SMB_VC_LOCK(vcp);
1257 	while (vcp->vc_state == SMBIOD_ST_IDLE) {
1258 		tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1259 		    delta, TR_CLOCK_TICK);
1260 		if (tr == 0) {
1261 			err = EINTR;
1262 			break;
1263 		}
1264 		if (tr < 0) {
1265 			/* timeout */
1266 			if (vcp->vc_co.co_usecount == 1) {
1267 				/* Let this IOD terminate. */
1268 				smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
1269 				/* nobody to cv_broadcast */
1270 				break;
1271 			}
1272 		}
1273 	}
1274 	SMB_VC_UNLOCK(vcp);
1275 
1276 	return (err);
1277 }
1278 
1279 /*
1280  * After a failed reconnect attempt, smbiod will
1281  * call this to make current requests error out.
1282  */
1283 int
1284 smb_iod_vc_rcfail(struct smb_vc *vcp)
1285 {
1286 	clock_t tr;
1287 	int err = 0;
1288 
1289 	/*
1290 	 * This is called by the one-and-only
1291 	 * IOD thread for this VC.
1292 	 */
1293 	ASSERT(vcp->iod_thr == curthread);
1294 	SMB_VC_LOCK(vcp);
1295 
1296 	smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1297 	cv_broadcast(&vcp->vc_statechg);
1298 
1299 	/*
1300 	 * Short wait here for two reasons:
1301 	 * (1) Give requests a chance to error out.
1302 	 * (2) Prevent immediate retry.
1303 	 */
1304 	tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1305 	    SEC_TO_TICK(5), TR_CLOCK_TICK);
1306 	if (tr == 0)
1307 		err = EINTR;
1308 
1309 	/*
1310 	 * While we were waiting on the CV, the state might have
1311 	 * changed to reconnect.  If so, leave that; otherwise
1312 	 * go to state idle until the next request.
1313 	 */
1314 	if (vcp->vc_state == SMBIOD_ST_RCFAILED)
1315 		smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1316 	cv_broadcast(&vcp->vc_statechg);
1317 
1318 	SMB_VC_UNLOCK(vcp);
1319 
1320 	return (err);
1321 }
1322 
1323 /*
1324  * Ask the IOD to reconnect (if not already underway)
1325  * then wait for the reconnect to finish.
1326  */
1327 int
1328 smb_iod_reconnect(struct smb_vc *vcp)
1329 {
1330 	int err = 0, rv;
1331 
1332 	SMB_VC_LOCK(vcp);
1333 again:
1334 	switch (vcp->vc_state) {
1335 
1336 	case SMBIOD_ST_IDLE:
1337 		smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1338 		cv_signal(&vcp->iod_idle);
1339 		/* FALLTHROUGH */
1340 
1341 	case SMBIOD_ST_RECONNECT:
1342 	case SMBIOD_ST_CONNECTED:
1343 	case SMBIOD_ST_NEGOTIATED:
1344 	case SMBIOD_ST_AUTHCONT:
1345 	case SMBIOD_ST_AUTHOK:
1346 		rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1347 		if (rv == 0) {
1348 			err = EINTR;
1349 			break;
1350 		}
1351 		goto again;
1352 
1353 	case SMBIOD_ST_VCACTIVE:
1354 		err = 0; /* success! */
1355 		break;
1356 
1357 	case SMBIOD_ST_AUTHFAIL:
1358 	case SMBIOD_ST_RCFAILED:
1359 	case SMBIOD_ST_DEAD:
1360 	default:
1361 		err = ENOTCONN;
1362 		break;
1363 	}
1364 
1365 	SMB_VC_UNLOCK(vcp);
1366 	return (err);
1367 }
1368