xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision 42645588b93573e79aaead58bdaf7857c3736401)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #ifdef DEBUG
41 /* See sys/queue.h */
42 #define	QUEUEDEBUG 1
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/atomic.h>
48 #include <sys/proc.h>
49 #include <sys/thread.h>
50 #include <sys/kmem.h>
51 #include <sys/unistd.h>
52 #include <sys/mount.h>
53 #include <sys/vnode.h>
54 #include <sys/types.h>
55 #include <sys/ddi.h>
56 #include <sys/sunddi.h>
57 #include <sys/stream.h>
58 #include <sys/strsun.h>
59 #include <sys/time.h>
60 #include <sys/class.h>
61 #include <sys/disp.h>
62 #include <sys/cmn_err.h>
63 #include <sys/zone.h>
64 #include <sys/sdt.h>
65 
66 #ifdef APPLE
67 #include <sys/smb_apple.h>
68 #else
69 #include <netsmb/smb_osdep.h>
70 #endif
71 
72 #include <netsmb/smb.h>
73 #include <netsmb/smb_conn.h>
74 #include <netsmb/smb_rq.h>
75 #include <netsmb/smb_subr.h>
76 #include <netsmb/smb_tran.h>
77 #include <netsmb/smb_trantcp.h>
78 
79 #ifdef NEED_SMBFS_CALLBACKS
80 /*
81  * This is set/cleared when smbfs loads/unloads
82  * No locks should be necessary, because smbfs
83  * can't unload until all the mounts are gone.
84  */
85 static smb_fscb_t *fscb;
86 int
87 smb_fscb_set(smb_fscb_t *cb)
88 {
89 	fscb = cb;
90 	return (0);
91 }
92 #endif /* NEED_SMBFS_CALLBACKS */
93 
94 static void smb_iod_sendall(struct smb_vc *);
95 static void smb_iod_recvall(struct smb_vc *);
96 static void smb_iod_main(struct smb_vc *);
97 
98 
99 #define	SMBIOD_SLEEP_TIMO	2
100 #define	SMBIOD_PING_TIMO	60	/* seconds */
101 
102 /*
103  * After this many seconds we want an unresponded-to request to trigger
104  * some sort of UE (dialogue).  If the connection hasn't responded at all
105  * in this many seconds then the dialogue is of the "connection isn't
106  * responding would you like to force unmount" variety.  If the connection
107  * has been responding (to other requests that is) then we need a dialogue
108  * of the "operation is still pending do you want to cancel it" variety.
109  * At present this latter dialogue does not exist so we have no UE and
110  * just keep waiting for the slow operation.
111  */
112 #define	SMBUETIMEOUT 8 /* seconds */
113 
114 
115 /* Lock Held version of the next function. */
116 static inline void
117 smb_iod_rqprocessed_LH(
118 	struct smb_rq *rqp,
119 	int error,
120 	int flags)
121 {
122 	rqp->sr_flags |= flags;
123 	rqp->sr_lerror = error;
124 	rqp->sr_rpgen++;
125 	rqp->sr_state = SMBRQ_NOTIFIED;
126 	cv_broadcast(&rqp->sr_cond);
127 }
128 
129 static void
130 smb_iod_rqprocessed(
131 	struct smb_rq *rqp,
132 	int error,
133 	int flags)
134 {
135 
136 	SMBRQ_LOCK(rqp);
137 	smb_iod_rqprocessed_LH(rqp, error, flags);
138 	SMBRQ_UNLOCK(rqp);
139 }
140 
141 static void
142 smb_iod_invrq(struct smb_vc *vcp)
143 {
144 	struct smb_rq *rqp;
145 
146 	/*
147 	 * Invalidate all outstanding requests for this connection
148 	 */
149 	rw_enter(&vcp->iod_rqlock, RW_READER);
150 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
151 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
152 	}
153 	rw_exit(&vcp->iod_rqlock);
154 }
155 
156 #ifdef SMBTP_UPCALL
157 static void
158 smb_iod_sockwakeup(struct smb_vc *vcp)
159 {
160 	/* note: called from socket upcall... */
161 }
162 #endif
163 
164 /*
165  * Called after we fail to send or recv.
166  * Called with no locks held.
167  */
168 static void
169 smb_iod_dead(struct smb_vc *vcp)
170 {
171 
172 	SMB_VC_LOCK(vcp);
173 	vcp->vc_state = SMBIOD_ST_DEAD;
174 	cv_broadcast(&vcp->vc_statechg);
175 
176 #ifdef NEED_SMBFS_CALLBACKS
177 	if (fscb != NULL) {
178 		struct smb_connobj *co;
179 		/*
180 		 * Walk the share list, notify...
181 		 * Was: smbfs_dead(...share->ss_mount);
182 		 * XXX: Ok to hold vc_lock here?
183 		 * XXX: More to do here?
184 		 */
185 		SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
186 			/* smbfs_dead() */
187 			fscb->fscb_dead(CPTOSS(co));
188 		}
189 	}
190 #endif /* NEED_SMBFS_CALLBACKS */
191 
192 	SMB_VC_UNLOCK(vcp);
193 
194 	smb_iod_invrq(vcp);
195 }
196 
197 int
198 smb_iod_connect(struct smb_vc *vcp)
199 {
200 	struct proc *p = curproc;
201 	int error;
202 
203 	if (vcp->vc_state != SMBIOD_ST_RECONNECT)
204 		return (EINVAL);
205 
206 	if (vcp->vc_laddr) {
207 		error = SMB_TRAN_BIND(vcp, vcp->vc_laddr, p);
208 		if (error)
209 			goto errout;
210 	}
211 
212 #ifdef SMBTP_SELECTID
213 	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, vcp);
214 #endif
215 #ifdef SMBTP_UPCALL
216 	SMB_TRAN_SETPARAM(vcp, SMBTP_UPCALL, (void *)smb_iod_sockwakeup);
217 #endif
218 
219 	error = SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, p);
220 	if (error) {
221 		SMBIODEBUG("connection to %s error %d\n",
222 		    vcp->vc_srvname, error);
223 		goto errout;
224 	}
225 
226 	/* Success! */
227 	return (0);
228 
229 errout:
230 
231 	return (error);
232 }
233 
234 /*
235  * Called by smb_vc_rele, smb_vc_kill
236  * Make the connection go away, and
237  * the IOD (reader) thread too!
238  */
239 int
240 smb_iod_disconnect(struct smb_vc *vcp)
241 {
242 
243 	/*
244 	 * Let's be safe here and avoid doing any
245 	 * call across the network while trying to
246 	 * shut things down.  If we just disconnect,
247 	 * the server will take care of the logoff.
248 	 */
249 #if 0
250 	if (vcp->vc_state == SMBIOD_ST_VCACTIVE) {
251 		smb_smb_ssnclose(vcp, &vcp->vc_scred);
252 		vcp->vc_state = SMBIOD_ST_TRANACTIVE;
253 	}
254 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
255 #endif
256 
257 	/*
258 	 * Used to call smb_iod_closetran here,
259 	 * which did both disconnect and close.
260 	 * We now do the close in smb_vc_free,
261 	 * so we always have a valid vc_tdata.
262 	 * Now just send the disconnect here.
263 	 * Extra disconnect calls are ignored.
264 	 */
265 	SMB_TRAN_DISCONNECT(vcp, curproc);
266 
267 	/*
268 	 * If we have an IOD, let it handle the
269 	 * state change when it receives the ACK
270 	 * from the disconnect we just sent.
271 	 * Otherwise set the state here, i.e.
272 	 * after failing session setup.
273 	 */
274 	SMB_VC_LOCK(vcp);
275 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
276 		vcp->vc_state = SMBIOD_ST_DEAD;
277 		cv_broadcast(&vcp->vc_statechg);
278 	}
279 	SMB_VC_UNLOCK(vcp);
280 
281 	return (0);
282 }
283 
284 /*
285  * Send one request.
286  *
287  * Called by _addrq (for internal requests)
288  * and by _sendall (via _addrq, _waitrq)
289  */
290 static int
291 smb_iod_sendrq(struct smb_rq *rqp)
292 {
293 	struct proc *p = curproc;
294 	struct smb_vc *vcp = rqp->sr_vc;
295 	struct smb_share *ssp = rqp->sr_share;
296 	mblk_t *m;
297 	int error;
298 
299 	ASSERT(vcp);
300 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
301 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
302 
303 	/*
304 	 * Note: requests with sr_flags & SMBR_INTERNAL
305 	 * need to pass here with these states:
306 	 *   SMBIOD_ST_TRANACTIVE: smb_negotiate
307 	 *   SMBIOD_ST_NEGOACTIVE: smb_ssnsetup
308 	 */
309 	SMBIODEBUG("vc_state = %d\n", vcp->vc_state);
310 	switch (vcp->vc_state) {
311 	case SMBIOD_ST_NOTCONN:
312 		smb_iod_rqprocessed(rqp, ENOTCONN, 0);
313 		return (0);
314 	case SMBIOD_ST_DEAD:
315 		/* This is what keeps the iod itself from sending more */
316 		smb_iod_rqprocessed(rqp, ENOTCONN, 0);
317 		return (0);
318 	case SMBIOD_ST_RECONNECT:
319 		return (0);
320 	default:
321 		break;
322 	}
323 
324 	if (rqp->sr_sendcnt == 0) {
325 
326 		*rqp->sr_rquid = htoles(vcp->vc_smbuid);
327 
328 		/*
329 		 * XXX: Odd place for all this...
330 		 * Would expect these values in vc_smbuid
331 		 * and/or the request before we get here.
332 		 * I think most of this mess is due to having
333 		 * the initial UID set to SMB_UID_UKNOWN when
334 		 * it should have been initialized to zero!
335 		 * REVIST this later. XXX -gwr
336 		 *
337 		 * This is checking for the case where
338 		 * "vc_smbuid" was set to 0 in "smb_smb_ssnsetup()";
339 		 * that happens for requests that occur
340 		 * after that's done but before we get back the final
341 		 * session setup reply, where the latter is what
342 		 * gives us the UID.  (There can be an arbitrary # of
343 		 * session setup packet exchanges to complete
344 		 * "extended security" authentication.)
345 		 *
346 		 * However, if the server gave us a UID of 0 in a
347 		 * Session Setup andX reply, and we then do a
348 		 * Tree Connect andX and get back a TID, we should
349 		 * use that TID, not 0, in subsequent references to
350 		 * that tree (e.g., in NetShareEnum RAP requests).
351 		 *
352 		 * So, for now, we forcibly zero out the TID only if we're
353 		 * doing extended security, as that's the only time
354 		 * that "vc_smbuid" should be explicitly zeroed.
355 		 *
356 		 * note we must and do use SMB_TID_UNKNOWN for SMB_COM_ECHO
357 		 */
358 		if (!vcp->vc_smbuid &&
359 		    (vcp->vc_hflags2 & SMB_FLAGS2_EXT_SEC))
360 			*rqp->sr_rqtid = htoles(0);
361 		else
362 			*rqp->sr_rqtid =
363 			    htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
364 		mb_fixhdr(&rqp->sr_rq);
365 	}
366 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
367 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
368 		/*
369 		 * If all attempts to send a request failed, then
370 		 * something is seriously hosed.
371 		 */
372 		return (ENOTCONN);
373 	}
374 
375 	/*
376 	 * Replaced m_copym() with Solaris copymsg() which does the same
377 	 * work when we want to do a M_COPYALL.
378 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
379 	 */
380 	m = copymsg(rqp->sr_rq.mb_top);
381 
382 #ifdef DTRACE_PROBE
383 	DTRACE_PROBE2(smb_iod_sendrq,
384 	    (smb_rq_t *), rqp, (mblk_t *), m);
385 #else
386 	SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
387 #endif
388 	m_dumpm(m);
389 
390 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, p) : ENOBUFS;
391 	m = 0; /* consumed by SEND */
392 	if (error == 0) {
393 		SMBRQ_LOCK(rqp);
394 		rqp->sr_flags |= SMBR_SENT;
395 		rqp->sr_state = SMBRQ_SENT;
396 		if (rqp->sr_flags & SMBR_SENDWAIT)
397 			cv_broadcast(&rqp->sr_cond);
398 		SMBRQ_UNLOCK(rqp);
399 		return (0);
400 	}
401 	/*
402 	 * Check for fatal errors
403 	 */
404 	if (SMB_TRAN_FATAL(vcp, error)) {
405 		/*
406 		 * No further attempts should be made
407 		 */
408 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
409 		return (ENOTCONN);
410 	}
411 	if (error)
412 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
413 
414 #ifdef APPLE
415 	/* If proc waiting on rqp was signaled... */
416 	if (smb_rq_intr(rqp))
417 		smb_iod_rqprocessed(rqp, EINTR, 0);
418 #endif
419 
420 	return (0);
421 }
422 
423 static int
424 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
425 {
426 	struct proc *p = curproc;
427 	mblk_t *m;
428 	uchar_t *hp;
429 	int error;
430 
431 top:
432 	m = NULL;
433 	error = SMB_TRAN_RECV(vcp, &m, p);
434 	if (error == EAGAIN)
435 		goto top;
436 	if (error)
437 		return (error);
438 	ASSERT(m);
439 
440 	m = m_pullup(m, SMB_HDRLEN);
441 	if (m == NULL) {
442 		return (ENOSR);
443 	}
444 
445 	/*
446 	 * Check the SMB header
447 	 */
448 	hp = mtod(m, uchar_t *);
449 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
450 		m_freem(m);
451 		return (EPROTO);
452 	}
453 
454 	*mpp = m;
455 	return (0);
456 }
457 
458 /*
459  * Process incoming packets
460  *
461  * This is the "reader" loop, run by the IOD thread
462  * while in state SMBIOD_ST_VCACTIVE.  The loop now
463  * simply blocks in the socket recv until either a
464  * message arrives, or a disconnect.
465  */
466 static void
467 smb_iod_recvall(struct smb_vc *vcp)
468 {
469 	struct smb_rq *rqp;
470 	mblk_t *m;
471 	uchar_t *hp;
472 	ushort_t mid;
473 	int error;
474 	int etime_count = 0; /* for "server not responding", etc. */
475 
476 	for (;;) {
477 
478 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
479 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
480 			error = EIO;
481 			break;
482 		}
483 
484 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
485 			SMBIODEBUG("SHUTDOWN set\n");
486 			error = EIO;
487 			break;
488 		}
489 
490 		m = NULL;
491 		error = smb_iod_recv1(vcp, &m);
492 
493 		if ((error == ETIME) && vcp->iod_rqwaiting) {
494 			/*
495 			 * Nothing received for 15 seconds,
496 			 * and we have requests waiting.
497 			 */
498 			etime_count++;
499 
500 			/*
501 			 * Once, at 15 sec. notify callbacks
502 			 * and print the warning message.
503 			 */
504 			if (etime_count == 1) {
505 				smb_iod_notify_down(vcp);
506 				zprintf(vcp->vc_zoneid,
507 				    "SMB server %s not responding\n",
508 				    vcp->vc_srvname);
509 			}
510 
511 			/*
512 			 * At 30 sec. try sending an echo, and then
513 			 * once a minute thereafter. It's tricky to
514 			 * do a send from the IOD thread because
515 			 * we don't want to block here.
516 			 *
517 			 * Using tmo=SMBNOREPLYWAIT in the request
518 			 * so smb_rq_reply will skip smb_iod_waitrq.
519 			 * The smb_smb_echo call uses SMBR_INTERNAL
520 			 * to avoid calling smb_iod_sendall().
521 			 */
522 			if ((etime_count & 3) == 2) {
523 				smb_smb_echo(vcp, &vcp->vc_scred,
524 				    SMBNOREPLYWAIT);
525 			}
526 
527 			continue;
528 		} /* ETIME && iod_rqwaiting */
529 
530 		if (error == ETIME) {
531 			/*
532 			 * If the IOD thread holds the last reference
533 			 * to this VC, disconnect, release, terminate.
534 			 * Usually can avoid the lock/unlock here.
535 			 * Note, in-line: _vc_kill ... _vc_gone
536 			 */
537 			if (vcp->vc_co.co_usecount > 1)
538 				continue;
539 			SMB_VC_LOCK(vcp);
540 			if (vcp->vc_co.co_usecount == 1 &&
541 			    (vcp->vc_flags & SMBV_GONE) == 0) {
542 				vcp->vc_flags |= SMBV_GONE;
543 				SMB_VC_UNLOCK(vcp);
544 				smb_iod_disconnect(vcp);
545 				break;
546 			}
547 			SMB_VC_UNLOCK(vcp);
548 			continue;
549 		} /* error == ETIME */
550 
551 		if (error) {
552 			/*
553 			 * It's dangerous to continue here.
554 			 * (possible infinite loop!)
555 			 */
556 			break;
557 		}
558 
559 		/*
560 		 * Received something.  Yea!
561 		 */
562 		if (etime_count) {
563 			etime_count = 0;
564 
565 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
566 			    vcp->vc_srvname);
567 
568 			smb_iod_notify_up(vcp);
569 		}
570 
571 		/*
572 		 * Have an SMB packet.  The SMB header was
573 		 * checked in smb_iod_recv1().
574 		 * Find the request...
575 		 */
576 		hp = mtod(m, uchar_t *);
577 		/*LINTED*/
578 		mid = SMB_HDRMID(hp);
579 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
580 
581 		rw_enter(&vcp->iod_rqlock, RW_READER);
582 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
583 
584 			if (rqp->sr_mid != mid)
585 				continue;
586 
587 			DTRACE_PROBE2(smb_iod_recvrq,
588 			    (smb_rq_t *), rqp, (mblk_t *), m);
589 			m_dumpm(m);
590 
591 			SMBRQ_LOCK(rqp);
592 			if (rqp->sr_rp.md_top == NULL) {
593 				md_initm(&rqp->sr_rp, m);
594 			} else {
595 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
596 					md_append_record(&rqp->sr_rp, m);
597 				} else {
598 					SMBRQ_UNLOCK(rqp);
599 					SMBSDEBUG("duplicate response %d "
600 					    "(ignored)\n", mid);
601 					break;
602 				}
603 			}
604 			smb_iod_rqprocessed_LH(rqp, 0, 0);
605 			SMBRQ_UNLOCK(rqp);
606 			break;
607 		}
608 
609 		if (rqp == NULL) {
610 			int cmd = SMB_HDRCMD(hp);
611 
612 			if (cmd != SMB_COM_ECHO)
613 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
614 				    (uint_t)mid, cmd);
615 /*			smb_printrqlist(vcp); */
616 			m_freem(m);
617 		}
618 		rw_exit(&vcp->iod_rqlock);
619 
620 	}
621 #ifdef APPLE
622 	/*
623 	 * check for interrupts
624 	 * On Solaris, handle in smb_iod_waitrq
625 	 */
626 	rw_enter(&vcp->iod_rqlock, RW_READER);
627 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
628 		if (smb_sigintr(rqp->sr_cred->scr_vfsctx))
629 			smb_iod_rqprocessed(rqp, EINTR, 0);
630 	}
631 	rw_exit(&vcp->iod_rqlock);
632 #endif
633 }
634 
635 /*
636  * Looks like we don't need these callbacks,
637  * but keep the code for now (for Apple).
638  */
639 /*ARGSUSED*/
640 void
641 smb_iod_notify_down(struct smb_vc *vcp)
642 {
643 #ifdef NEED_SMBFS_CALLBACKS
644 	struct smb_connobj *co;
645 
646 	if (fscb == NULL)
647 		return;
648 
649 	/*
650 	 * Walk the share list, notify...
651 	 * Was: smbfs_down(...share->ss_mount);
652 	 * XXX: Ok to hold vc_lock here?
653 	 */
654 	SMB_VC_LOCK(vcp);
655 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
656 		/* smbfs_down() */
657 		fscb->fscb_down(CPTOSS(co));
658 	}
659 	SMB_VC_UNLOCK(vcp);
660 #endif /* NEED_SMBFS_CALLBACKS */
661 }
662 
663 /*ARGSUSED*/
664 void
665 smb_iod_notify_up(struct smb_vc *vcp)
666 {
667 #ifdef NEED_SMBFS_CALLBACKS
668 	struct smb_connobj *co;
669 
670 	if (fscb == NULL)
671 		return;
672 
673 	/*
674 	 * Walk the share list, notify...
675 	 * Was: smbfs_up(...share->ss_mount);
676 	 * XXX: Ok to hold vc_lock here?
677 	 */
678 	SMB_VC_LOCK(vcp);
679 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
680 		/* smbfs_up() */
681 		fscb->fscb_up(CPTOSS(co));
682 	}
683 	SMB_VC_UNLOCK(vcp);
684 #endif /* NEED_SMBFS_CALLBACKS */
685 }
686 
687 /*
688  * The IOD thread is now just a "reader",
689  * so no more smb_iod_request().  Yea!
690  */
691 
692 /*
693  * Place request in the queue, and send it now if possible.
694  * Called with no locks held.
695  */
696 int
697 smb_iod_addrq(struct smb_rq *rqp)
698 {
699 	struct smb_vc *vcp = rqp->sr_vc;
700 	int error, save_newrq;
701 
702 	SMBIODEBUG("entry, mid=%d\n", rqp->sr_mid);
703 
704 	ASSERT(rqp->sr_cred);
705 
706 	/* This helps a little with debugging. */
707 	rqp->sr_owner = curthread;
708 
709 	if (rqp->sr_flags & SMBR_INTERNAL) {
710 		/*
711 		 * This is some kind of internal request,
712 		 * i.e. negotiate, session setup, echo...
713 		 * Allow vc_state < SMBIOD_ST_VCACTIVE, and
714 		 * always send directly from this thread.
715 		 * May be called by the IOD thread (echo).
716 		 * Note lock order: iod_rqlist, vc_sendlock
717 		 */
718 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
719 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
720 		rw_downgrade(&vcp->iod_rqlock);
721 
722 		/*
723 		 * Note: iod_sendrq expects vc_sendlock,
724 		 * so take that here, but carefully:
725 		 * Never block the IOD thread here.
726 		 */
727 		if (curthread == vcp->iod_thr) {
728 			if (sema_tryp(&vcp->vc_sendlock) == 0) {
729 				SMBIODEBUG("sendlock busy\n");
730 				error = EAGAIN;
731 			} else {
732 				/* Have vc_sendlock */
733 				error = smb_iod_sendrq(rqp);
734 				sema_v(&vcp->vc_sendlock);
735 			}
736 		} else {
737 			sema_p(&vcp->vc_sendlock);
738 			error = smb_iod_sendrq(rqp);
739 			sema_v(&vcp->vc_sendlock);
740 		}
741 
742 		rw_exit(&vcp->iod_rqlock);
743 		if (error)
744 			smb_iod_removerq(rqp);
745 
746 		return (error);
747 	}
748 
749 	/*
750 	 * Normal request from the driver or smbfs.
751 	 * State should be correct after the check in
752 	 * smb_rq_enqueue(), but we dropped locks...
753 	 */
754 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
755 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
756 		return (ENOTCONN);
757 	}
758 
759 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
760 
761 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
762 
763 	/* iod_rqlock/WRITER protects iod_newrq */
764 	save_newrq = vcp->iod_newrq;
765 	vcp->iod_newrq++;
766 
767 	rw_exit(&vcp->iod_rqlock);
768 
769 	/*
770 	 * Now send any requests that need to be sent,
771 	 * including the one we just put on the list.
772 	 * Only the thread that found iod_newrq==0
773 	 * needs to run the send loop.
774 	 */
775 	if (save_newrq == 0)
776 		smb_iod_sendall(vcp);
777 
778 	return (0);
779 }
780 
781 /*
782  * Mark an SMBR_MULTIPACKET request as
783  * needing another send.  Similar to the
784  * "normal" part of smb_iod_addrq.
785  */
786 int
787 smb_iod_multirq(struct smb_rq *rqp)
788 {
789 	struct smb_vc *vcp = rqp->sr_vc;
790 	int save_newrq;
791 
792 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
793 
794 	if (rqp->sr_flags & SMBR_INTERNAL)
795 		return (EINVAL);
796 
797 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
798 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
799 		return (ENOTCONN);
800 	}
801 
802 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
803 
804 	/* Already on iod_rqlist, just reset state. */
805 	rqp->sr_state = SMBRQ_NOTSENT;
806 
807 	/* iod_rqlock/WRITER protects iod_newrq */
808 	save_newrq = vcp->iod_newrq;
809 	vcp->iod_newrq++;
810 
811 	rw_exit(&vcp->iod_rqlock);
812 
813 	/*
814 	 * Now send any requests that need to be sent,
815 	 * including the one we just marked NOTSENT.
816 	 * Only the thread that found iod_newrq==0
817 	 * needs to run the send loop.
818 	 */
819 	if (save_newrq == 0)
820 		smb_iod_sendall(vcp);
821 
822 	return (0);
823 }
824 
825 
826 int
827 smb_iod_removerq(struct smb_rq *rqp)
828 {
829 	struct smb_vc *vcp = rqp->sr_vc;
830 
831 	SMBIODEBUG("entry, mid=%d\n", rqp->sr_mid);
832 
833 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
834 #ifdef QUEUEDEBUG
835 	/*
836 	 * Make sure we have not already removed it.
837 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
838 	 * XXX: Don't like the constant 1 here...
839 	 */
840 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
841 #endif
842 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
843 	rw_exit(&vcp->iod_rqlock);
844 
845 	return (0);
846 }
847 
848 
849 /*
850  * Internal version of smb_iod_waitrq.
851  *
852  * This is used when there is no reader thread,
853  * so we have to do the recv here.  The request
854  * must have the SMBR_INTERNAL flag set.
855  */
856 static int
857 smb_iod_waitrq_internal(struct smb_rq *rqp)
858 {
859 	struct smb_vc *vcp = rqp->sr_vc;
860 	mblk_t *m;
861 	uchar_t *hp;
862 	int error;
863 	uint16_t mid;
864 	uint8_t cmd;
865 
866 	/* Make sure it's an internal request. */
867 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0) {
868 		SMBIODEBUG("not internal\n");
869 		return (EINVAL);
870 	}
871 
872 	/* Only simple requests allowed. */
873 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
874 		SMBIODEBUG("multipacket\n");
875 		return (EINVAL);
876 	}
877 
878 	/* Should not already have a response. */
879 	if (rqp->sr_rp.md_top) {
880 		DEBUG_ENTER("smb_iod_waitrq again?\n");
881 		return (0);
882 	}
883 
884 	/*
885 	 * The message recv loop.  Terminates when we
886 	 * receive the message we're looking for.
887 	 * Drop others, with complaints.
888 	 * Scaled-down version of smb_iod_recvall
889 	 */
890 	for (;;) {
891 		m = NULL;
892 		error = smb_iod_recv1(vcp, &m);
893 		if (error) {
894 			/*
895 			 * It's dangerous to continue here.
896 			 * (possible infinite loop!)
897 			 */
898 #if 0
899 			if (SMB_TRAN_FATAL(vcp, error)) {
900 				return (error);
901 			}
902 			continue;
903 #endif
904 			return (error);
905 		}
906 
907 		hp = mtod(m, uchar_t *);
908 		cmd = SMB_HDRCMD(hp);
909 		/*LINTED*/
910 		mid = SMB_HDRMID(hp);
911 
912 		SMBIODEBUG("cmd 0x%02x mid %04x\n",
913 		    (uint_t)cmd, (uint_t)mid);
914 		m_dumpm(m);
915 
916 		/*
917 		 * Normally, the MID will match.
918 		 * For internal requests, also
919 		 * match on the cmd to be safe.
920 		 */
921 		if (mid == rqp->sr_mid)
922 			break;
923 		if (cmd == rqp->sr_cmd) {
924 			SMBIODEBUG("cmd match but not mid!\n");
925 			break;
926 		}
927 
928 		SMBIODEBUG("drop nomatch\n");
929 		m_freem(m);
930 	}
931 
932 	/*
933 	 * Have the response we were waiting for.
934 	 * Simplified version of the code from
935 	 * smb_iod_recvall
936 	 */
937 	SMBRQ_LOCK(rqp);
938 	if (rqp->sr_rp.md_top == NULL) {
939 		md_initm(&rqp->sr_rp, m);
940 	} else {
941 		SMBIODEBUG("drop duplicate\n");
942 		m_freem(m);
943 	}
944 	SMBRQ_UNLOCK(rqp);
945 
946 	return (0);
947 }
948 
949 
950 /*
951  * Wait for a request to complete.
952  *
953  * For internal requests, see smb_iod_waitrq_internal.
954  * For normal requests, we need to deal with
955  * ioc_muxcnt dropping below vc_maxmux by
956  * making arrangements to send more...
957  */
958 int
959 smb_iod_waitrq(struct smb_rq *rqp)
960 {
961 	struct smb_vc *vcp = rqp->sr_vc;
962 	clock_t tr, tmo1, tmo2;
963 	int error, rc;
964 
965 	SMBIODEBUG("entry, cmd=0x%02x mid=0x%04x\n",
966 	    (uint_t)rqp->sr_cmd, (uint_t)rqp->sr_mid);
967 
968 	if (rqp->sr_flags & SMBR_INTERNAL) {
969 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
970 		error = smb_iod_waitrq_internal(rqp);
971 		smb_iod_removerq(rqp);
972 		return (error);
973 	}
974 
975 	/*
976 	 * Make sure this is NOT the IOD thread,
977 	 * or the wait below will always timeout.
978 	 */
979 	ASSERT(curthread != vcp->iod_thr);
980 
981 	atomic_inc_uint(&vcp->iod_rqwaiting);
982 	SMBRQ_LOCK(rqp);
983 
984 	/*
985 	 * First, wait for the request to be sent.  Normally the send
986 	 * has already happened by the time we get here.  However, if
987 	 * we have more than maxmux entries in the request list, our
988 	 * request may not be sent until other requests complete.
989 	 * The wait in this case is due to local I/O demands, so
990 	 * we don't want the server response timeout to apply.
991 	 *
992 	 * If a request is allowed to interrupt this wait, then the
993 	 * request is cancelled and never sent OTW.  Some kinds of
994 	 * requests should never be cancelled (i.e. close) and those
995 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
996 	 * or a connection close will terminate them with ENOTCONN.
997 	 */
998 	while (rqp->sr_state == SMBRQ_NOTSENT) {
999 		rqp->sr_flags |= SMBR_SENDWAIT;
1000 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
1001 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
1002 			rc = 1;
1003 		} else
1004 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
1005 		rqp->sr_flags &= ~SMBR_SENDWAIT;
1006 		if (rc == 0) {
1007 			SMBIODEBUG("EINTR in sendwait, mid=%u\n", rqp->sr_mid);
1008 			error = EINTR;
1009 			goto out;
1010 		}
1011 	}
1012 
1013 	/*
1014 	 * The request has been sent.  Now wait for the response,
1015 	 * with the timeout specified for this request.
1016 	 * Compute all the deadlines now, so we effectively
1017 	 * start the timer(s) after the request is sent.
1018 	 */
1019 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1020 		tmo1 = lbolt + SEC_TO_TICK(smb_timo_notice);
1021 	else
1022 		tmo1 = 0;
1023 	tmo2 = lbolt + SEC_TO_TICK(rqp->sr_timo);
1024 
1025 	/*
1026 	 * As above, we don't want to allow interrupt for some
1027 	 * requests like open, because we could miss a succesful
1028 	 * response and therefore "leak" a FID.  Such requests
1029 	 * are marked SMBR_NOINTR_RECV to prevent that.
1030 	 *
1031 	 * If "slow server" warnings are enabled, wait first
1032 	 * for the "notice" timeout, and warn if expired.
1033 	 */
1034 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1035 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1036 			tr = cv_timedwait(&rqp->sr_cond,
1037 			    &rqp->sr_lock, tmo1);
1038 		else
1039 			tr = cv_timedwait_sig(&rqp->sr_cond,
1040 			    &rqp->sr_lock, tmo1);
1041 		if (tr == 0) {
1042 			error = EINTR;
1043 			goto out;
1044 		}
1045 		if (tr < 0) {
1046 #ifdef DTRACE_PROBE
1047 			DTRACE_PROBE1(smb_iod_waitrq1,
1048 			    (smb_rq_t *), rqp);
1049 #endif
1050 #ifdef NOT_YET
1051 			/* Want this to go ONLY to the user. */
1052 			uprintf("SMB server %s has not responded"
1053 			    " to request %d after %d seconds..."
1054 			    " (still waiting).\n", vcp->vc_srvname,
1055 			    rqp->sr_mid, smb_timo_notice);
1056 #endif
1057 		}
1058 	}
1059 
1060 	/*
1061 	 * Keep waiting until tmo2 is expired.
1062 	 */
1063 	while (rqp->sr_rpgen == rqp->sr_rplast) {
1064 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1065 			tr = cv_timedwait(&rqp->sr_cond,
1066 			    &rqp->sr_lock, tmo2);
1067 		else
1068 			tr = cv_timedwait_sig(&rqp->sr_cond,
1069 			    &rqp->sr_lock, tmo2);
1070 		if (tr == 0) {
1071 			error = EINTR;
1072 			goto out;
1073 		}
1074 		if (tr < 0) {
1075 #ifdef DTRACE_PROBE
1076 			DTRACE_PROBE1(smb_iod_waitrq2,
1077 			    (smb_rq_t *), rqp);
1078 #endif
1079 #ifdef NOT_YET
1080 			/* Want this to go ONLY to the user. */
1081 			uprintf("SMB server %s has not responded"
1082 			    " to request %d after %d seconds..."
1083 			    " (giving up).\n", vcp->vc_srvname,
1084 			    rqp->sr_mid, rqp->sr_timo);
1085 #endif
1086 			error = ETIME;
1087 			goto out;
1088 		}
1089 		/* got wakeup */
1090 	}
1091 	error = rqp->sr_lerror;
1092 	rqp->sr_rplast++;
1093 
1094 out:
1095 	SMBRQ_UNLOCK(rqp);
1096 	atomic_dec_uint(&vcp->iod_rqwaiting);
1097 
1098 	/*
1099 	 * MULTIPACKET request must stay in the list.
1100 	 * They may need additional responses.
1101 	 */
1102 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1103 		smb_iod_removerq(rqp);
1104 
1105 	/*
1106 	 * Some request has been completed.
1107 	 * If we reached the mux limit,
1108 	 * re-run the send loop...
1109 	 */
1110 	if (vcp->iod_muxfull)
1111 		smb_iod_sendall(vcp);
1112 
1113 	return (error);
1114 }
1115 
1116 /*
1117  * Shutdown all outstanding I/O requests on the specified share with
1118  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
1119  * non-forced unmount; if this is a forced unmount, we have to shutdown
1120  * the requests as part of the unmount process.)
1121  */
1122 void
1123 smb_iod_shutdown_share(struct smb_share *ssp)
1124 {
1125 	struct smb_vc *vcp = SSTOVC(ssp);
1126 	struct smb_rq *rqp;
1127 
1128 	/*
1129 	 * Loop through the list of requests and shutdown the ones
1130 	 * that are for the specified share.
1131 	 */
1132 	rw_enter(&vcp->iod_rqlock, RW_READER);
1133 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1134 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1135 			smb_iod_rqprocessed(rqp, EIO, 0);
1136 	}
1137 	rw_exit(&vcp->iod_rqlock);
1138 }
1139 
1140 /*
1141  * Send all requests that need sending.
1142  * Called from _addrq, _multirq, _waitrq
1143  */
1144 static void
1145 smb_iod_sendall(struct smb_vc *vcp)
1146 {
1147 	struct smb_rq *rqp;
1148 	int error, save_newrq, muxcnt;
1149 
1150 	/*
1151 	 * Clear "newrq" to make sure threads adding
1152 	 * new requests will run this function again.
1153 	 */
1154 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1155 	save_newrq = vcp->iod_newrq;
1156 	vcp->iod_newrq = 0;
1157 
1158 	/*
1159 	 * We only read iod_rqlist, so downgrade rwlock.
1160 	 * This allows the IOD to handle responses while
1161 	 * some requesting thread may be blocked in send.
1162 	 */
1163 	rw_downgrade(&vcp->iod_rqlock);
1164 
1165 	/* Expect to find about this many requests. */
1166 	SMBIODEBUG("top, save_newrq=%d\n", save_newrq);
1167 
1168 	/*
1169 	 * Serialize to prevent multiple senders.
1170 	 * Note lock order: iod_rqlock, vc_sendlock
1171 	 */
1172 	sema_p(&vcp->vc_sendlock);
1173 
1174 	/*
1175 	 * Walk the list of requests and send when possible.
1176 	 * We avoid having more than vc_maxmux requests
1177 	 * outstanding to the server by traversing only
1178 	 * vc_maxmux entries into this list.  Simple!
1179 	 */
1180 	ASSERT(vcp->vc_maxmux > 0);
1181 	error = muxcnt = 0;
1182 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1183 
1184 		if (vcp->vc_state == SMBIOD_ST_DEAD) {
1185 			error = ENOTCONN; /* stop everything! */
1186 			break;
1187 		}
1188 
1189 		if (rqp->sr_state == SMBRQ_NOTSENT) {
1190 			error = smb_iod_sendrq(rqp);
1191 			if (error)
1192 				break;
1193 		}
1194 
1195 		if (++muxcnt == vcp->vc_maxmux) {
1196 			SMBIODEBUG("muxcnt == vc_maxmux\n");
1197 			break;
1198 		}
1199 
1200 	}
1201 
1202 	/*
1203 	 * If we have vc_maxmux requests outstanding,
1204 	 * arrange for _waitrq to call _sendall as
1205 	 * requests are completed.
1206 	 */
1207 	vcp->iod_muxfull =
1208 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
1209 
1210 	sema_v(&vcp->vc_sendlock);
1211 	rw_exit(&vcp->iod_rqlock);
1212 
1213 	if (error == ENOTCONN)
1214 		smb_iod_dead(vcp);
1215 
1216 }
1217 
1218 
1219 /*
1220  * "main" function for smbiod daemon thread
1221  */
1222 void
1223 smb_iod_main(struct smb_vc *vcp)
1224 {
1225 	kthread_t *thr = curthread;
1226 
1227 	SMBIODEBUG("entry\n");
1228 
1229 	SMBIODEBUG("Running, thr=0x%p\n", thr);
1230 
1231 	/*
1232 	 * Prevent race with thread that created us.
1233 	 * After we get this lock iod_thr is set.
1234 	 */
1235 	SMB_VC_LOCK(vcp);
1236 	ASSERT(thr == vcp->iod_thr);
1237 
1238 	/* Redundant with iod_thr, but may help debugging. */
1239 	vcp->iod_flags |= SMBIOD_RUNNING;
1240 	SMB_VC_UNLOCK(vcp);
1241 
1242 	/*
1243 	 * OK, this is a new reader thread.
1244 	 * In case of reconnect, tell any
1245 	 * old requests they can restart.
1246 	 */
1247 	smb_iod_invrq(vcp);
1248 
1249 	/*
1250 	 * Run the "reader" loop.
1251 	 */
1252 	smb_iod_recvall(vcp);
1253 
1254 	/*
1255 	 * The reader loop function returns only when
1256 	 * there's been a fatal error on the connection.
1257 	 */
1258 	smb_iod_dead(vcp);
1259 
1260 	/*
1261 	 * The reader thread is going away.  Clear iod_thr,
1262 	 * and wake up anybody waiting for us to quit.
1263 	 */
1264 	SMB_VC_LOCK(vcp);
1265 	vcp->iod_flags &= ~SMBIOD_RUNNING;
1266 	vcp->iod_thr = NULL;
1267 	cv_broadcast(&vcp->iod_exit);
1268 	SMB_VC_UNLOCK(vcp);
1269 
1270 	/*
1271 	 * This hold was taken in smb_iod_create()
1272 	 * when this thread was created.
1273 	 */
1274 	smb_vc_rele(vcp);
1275 
1276 	SMBIODEBUG("Exiting, p=0x%p\n", curproc);
1277 	zthread_exit();
1278 }
1279 
1280 /*
1281  * Create the reader thread.
1282  *
1283  * This happens when we are just about to
1284  * enter vc_state = SMBIOD_ST_VCACTIVE;
1285  * See smb_sm_ssnsetup()
1286  */
1287 int
1288 smb_iod_create(struct smb_vc *vcp)
1289 {
1290 	kthread_t *thr = NULL;
1291 	int error;
1292 
1293 	/*
1294 	 * Take a hold on the VC for the IOD thread.
1295 	 * This hold will be released when the IOD
1296 	 * thread terminates. (or on error below)
1297 	 */
1298 	smb_vc_hold(vcp);
1299 
1300 	SMB_VC_LOCK(vcp);
1301 
1302 	if (vcp->iod_thr != NULL) {
1303 		SMBIODEBUG("aready have an IOD?");
1304 		error = EIO;
1305 		goto out;
1306 	}
1307 
1308 	/*
1309 	 * Darwin code used: IOCreateThread(...)
1310 	 * In Solaris, we use...
1311 	 */
1312 	thr = zthread_create(
1313 	    NULL,	/* stack */
1314 	    0, /* stack size (default) */
1315 	    smb_iod_main, /* entry func... */
1316 	    vcp, /* ... and arg */
1317 	    0, /* len (of what?) */
1318 	    minclsyspri); /* priority */
1319 	if (thr == NULL) {
1320 		SMBERROR("can't start smbiod\n");
1321 		error = ENOMEM;
1322 		goto out;
1323 	}
1324 
1325 	/* Success! */
1326 	error = 0;
1327 	vcp->iod_thr = thr;
1328 
1329 out:
1330 	SMB_VC_UNLOCK(vcp);
1331 
1332 	if (error)
1333 		smb_vc_rele(vcp);
1334 
1335 	return (error);
1336 }
1337 
1338 /*
1339  * Called from smb_vc_free to do any
1340  * cleanup of our IOD (reader) thread.
1341  */
1342 int
1343 smb_iod_destroy(struct smb_vc *vcp)
1344 {
1345 	clock_t tmo;
1346 
1347 	/*
1348 	 * Let's try to make sure the IOD thread
1349 	 * goes away, by waiting for it to exit.
1350 	 * Normally, it's gone by now.
1351 	 *
1352 	 * Only wait for a second, because we're in the
1353 	 * teardown path and don't want to get stuck here.
1354 	 * Should not take long, or things are hosed...
1355 	 */
1356 	SMB_VC_LOCK(vcp);
1357 	if (vcp->iod_thr) {
1358 		vcp->iod_flags |= SMBIOD_SHUTDOWN;
1359 		tmo = lbolt + hz;
1360 		tmo = cv_timedwait(&vcp->iod_exit, &vcp->vc_lock, tmo);
1361 		if (tmo == -1) {
1362 			SMBERROR("IOD thread for %s did not exit?\n",
1363 			    vcp->vc_srvname);
1364 		}
1365 	}
1366 	if (vcp->iod_thr) {
1367 		/* This should not happen. */
1368 		SMBIODEBUG("IOD thread did not exit!\n");
1369 		/* Try harder? */
1370 		tsignal(vcp->iod_thr, SIGKILL);
1371 	}
1372 	SMB_VC_UNLOCK(vcp);
1373 
1374 	return (0);
1375 }
1376