xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_iod.c (revision 4bff34e37def8a90f9194d81bc345c52ba20086a)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
33  */
34 
35 #pragma ident	"%Z%%M%	%I%	%E% SMI"
36 
37 #ifdef DEBUG
38 /* See sys/queue.h */
39 #define	QUEUEDEBUG 1
40 #endif
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/atomic.h>
45 #include <sys/proc.h>
46 #include <sys/thread.h>
47 #include <sys/kmem.h>
48 #include <sys/unistd.h>
49 #include <sys/mount.h>
50 #include <sys/vnode.h>
51 #include <sys/types.h>
52 #include <sys/ddi.h>
53 #include <sys/sunddi.h>
54 #include <sys/stream.h>
55 #include <sys/strsun.h>
56 #include <sys/time.h>
57 #include <sys/class.h>
58 #include <sys/disp.h>
59 #include <sys/cmn_err.h>
60 #include <sys/zone.h>
61 #include <sys/sdt.h>
62 
63 #ifdef APPLE
64 #include <sys/smb_apple.h>
65 #else
66 #include <netsmb/smb_osdep.h>
67 #endif
68 
69 #include <netsmb/smb.h>
70 #include <netsmb/smb_conn.h>
71 #include <netsmb/smb_rq.h>
72 #include <netsmb/smb_subr.h>
73 #include <netsmb/smb_tran.h>
74 #include <netsmb/smb_trantcp.h>
75 
76 #ifdef NEED_SMBFS_CALLBACKS
77 /*
78  * This is set/cleared when smbfs loads/unloads
79  * No locks should be necessary, because smbfs
80  * can't unload until all the mounts are gone.
81  */
82 static smb_fscb_t *fscb;
83 int
84 smb_fscb_set(smb_fscb_t *cb)
85 {
86 	fscb = cb;
87 	return (0);
88 }
89 #endif /* NEED_SMBFS_CALLBACKS */
90 
91 static void smb_iod_sendall(struct smb_vc *);
92 static void smb_iod_recvall(struct smb_vc *);
93 static void smb_iod_main(struct smb_vc *);
94 
95 
96 #define	SMBIOD_SLEEP_TIMO	2
97 #define	SMBIOD_PING_TIMO	60	/* seconds */
98 
99 /*
100  * After this many seconds we want an unresponded-to request to trigger
101  * some sort of UE (dialogue).  If the connection hasn't responded at all
102  * in this many seconds then the dialogue is of the "connection isn't
103  * responding would you like to force unmount" variety.  If the connection
104  * has been responding (to other requests that is) then we need a dialogue
105  * of the "operation is still pending do you want to cancel it" variety.
106  * At present this latter dialogue does not exist so we have no UE and
107  * just keep waiting for the slow operation.
108  */
109 #define	SMBUETIMEOUT 8 /* seconds */
110 
111 
112 /* Lock Held version of the next function. */
113 static inline void
114 smb_iod_rqprocessed_LH(
115 	struct smb_rq *rqp,
116 	int error,
117 	int flags)
118 {
119 	rqp->sr_flags |= flags;
120 	rqp->sr_lerror = error;
121 	rqp->sr_rpgen++;
122 	rqp->sr_state = SMBRQ_NOTIFIED;
123 	cv_broadcast(&rqp->sr_cond);
124 }
125 
126 static void
127 smb_iod_rqprocessed(
128 	struct smb_rq *rqp,
129 	int error,
130 	int flags)
131 {
132 
133 	SMBRQ_LOCK(rqp);
134 	smb_iod_rqprocessed_LH(rqp, error, flags);
135 	SMBRQ_UNLOCK(rqp);
136 }
137 
138 static void
139 smb_iod_invrq(struct smb_vc *vcp)
140 {
141 	struct smb_rq *rqp;
142 
143 	/*
144 	 * Invalidate all outstanding requests for this connection
145 	 */
146 	rw_enter(&vcp->iod_rqlock, RW_READER);
147 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
148 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
149 	}
150 	rw_exit(&vcp->iod_rqlock);
151 }
152 
153 #ifdef SMBTP_UPCALL
154 static void
155 smb_iod_sockwakeup(struct smb_vc *vcp)
156 {
157 	/* note: called from socket upcall... */
158 }
159 #endif
160 
161 /*
162  * Called after we fail to send or recv.
163  * Called with no locks held.
164  */
165 static void
166 smb_iod_dead(struct smb_vc *vcp)
167 {
168 
169 	SMB_VC_LOCK(vcp);
170 	vcp->vc_state = SMBIOD_ST_DEAD;
171 	cv_broadcast(&vcp->vc_statechg);
172 
173 #ifdef NEED_SMBFS_CALLBACKS
174 	if (fscb != NULL) {
175 		struct smb_connobj *co;
176 		/*
177 		 * Walk the share list, notify...
178 		 * Was: smbfs_dead(...share->ss_mount);
179 		 * XXX: Ok to hold vc_lock here?
180 		 * XXX: More to do here?
181 		 */
182 		SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
183 			/* smbfs_dead() */
184 			fscb->fscb_dead(CPTOSS(co));
185 		}
186 	}
187 #endif /* NEED_SMBFS_CALLBACKS */
188 
189 	SMB_VC_UNLOCK(vcp);
190 
191 	smb_iod_invrq(vcp);
192 }
193 
194 int
195 smb_iod_connect(struct smb_vc *vcp)
196 {
197 	struct proc *p = curproc;
198 	int error;
199 
200 	if (vcp->vc_state != SMBIOD_ST_RECONNECT)
201 		return (EINVAL);
202 
203 	if (vcp->vc_laddr) {
204 		error = SMB_TRAN_BIND(vcp, vcp->vc_laddr, p);
205 		if (error)
206 			goto errout;
207 	}
208 
209 #ifdef SMBTP_SELECTID
210 	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, vcp);
211 #endif
212 #ifdef SMBTP_UPCALL
213 	SMB_TRAN_SETPARAM(vcp, SMBTP_UPCALL, (void *)smb_iod_sockwakeup);
214 #endif
215 
216 	error = SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, p);
217 	if (error) {
218 		SMBIODEBUG("connection to %s error %d\n",
219 		    vcp->vc_srvname, error);
220 		goto errout;
221 	}
222 
223 	/* Success! */
224 	return (0);
225 
226 errout:
227 
228 	return (error);
229 }
230 
231 /*
232  * Called by smb_vc_rele, smb_vc_kill
233  * Make the connection go away, and
234  * the IOD (reader) thread too!
235  */
236 int
237 smb_iod_disconnect(struct smb_vc *vcp)
238 {
239 
240 	/*
241 	 * Let's be safe here and avoid doing any
242 	 * call across the network while trying to
243 	 * shut things down.  If we just disconnect,
244 	 * the server will take care of the logoff.
245 	 */
246 #if 0
247 	if (vcp->vc_state == SMBIOD_ST_VCACTIVE) {
248 		smb_smb_ssnclose(vcp, &vcp->vc_scred);
249 		vcp->vc_state = SMBIOD_ST_TRANACTIVE;
250 	}
251 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
252 #endif
253 
254 	/*
255 	 * Used to call smb_iod_closetran here,
256 	 * which did both disconnect and close.
257 	 * We now do the close in smb_vc_free,
258 	 * so we always have a valid vc_tdata.
259 	 * Now just send the disconnect here.
260 	 * Extra disconnect calls are ignored.
261 	 */
262 	SMB_TRAN_DISCONNECT(vcp, curproc);
263 
264 	/*
265 	 * If we have an IOD, let it handle the
266 	 * state change when it receives the ACK
267 	 * from the disconnect we just sent.
268 	 * Otherwise set the state here, i.e.
269 	 * after failing session setup.
270 	 */
271 	SMB_VC_LOCK(vcp);
272 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
273 		vcp->vc_state = SMBIOD_ST_DEAD;
274 		cv_broadcast(&vcp->vc_statechg);
275 	}
276 	SMB_VC_UNLOCK(vcp);
277 
278 	return (0);
279 }
280 
281 /*
282  * Send one request.
283  *
284  * Called by _addrq (for internal requests)
285  * and by _sendall (via _addrq, _waitrq)
286  */
287 static int
288 smb_iod_sendrq(struct smb_rq *rqp)
289 {
290 	struct proc *p = curproc;
291 	struct smb_vc *vcp = rqp->sr_vc;
292 	struct smb_share *ssp = rqp->sr_share;
293 	mblk_t *m;
294 	int error;
295 
296 	ASSERT(vcp);
297 	ASSERT(SEMA_HELD(&vcp->vc_sendlock));
298 	ASSERT(RW_READ_HELD(&vcp->iod_rqlock));
299 
300 	/*
301 	 * Note: requests with sr_flags & SMBR_INTERNAL
302 	 * need to pass here with these states:
303 	 *   SMBIOD_ST_TRANACTIVE: smb_negotiate
304 	 *   SMBIOD_ST_NEGOACTIVE: smb_ssnsetup
305 	 */
306 	SMBIODEBUG("vc_state = %d\n", vcp->vc_state);
307 	switch (vcp->vc_state) {
308 	case SMBIOD_ST_NOTCONN:
309 		smb_iod_rqprocessed(rqp, ENOTCONN, 0);
310 		return (0);
311 	case SMBIOD_ST_DEAD:
312 		/* This is what keeps the iod itself from sending more */
313 		smb_iod_rqprocessed(rqp, ENOTCONN, 0);
314 		return (0);
315 	case SMBIOD_ST_RECONNECT:
316 		return (0);
317 	default:
318 		break;
319 	}
320 
321 	if (rqp->sr_sendcnt == 0) {
322 
323 		*rqp->sr_rquid = htoles(vcp->vc_smbuid);
324 
325 		/*
326 		 * XXX: Odd place for all this...
327 		 * Would expect these values in vc_smbuid
328 		 * and/or the request before we get here.
329 		 * I think most of this mess is due to having
330 		 * the initial UID set to SMB_UID_UKNOWN when
331 		 * it should have been initialized to zero!
332 		 * REVIST this later. XXX -gwr
333 		 *
334 		 * This is checking for the case where
335 		 * "vc_smbuid" was set to 0 in "smb_smb_ssnsetup()";
336 		 * that happens for requests that occur
337 		 * after that's done but before we get back the final
338 		 * session setup reply, where the latter is what
339 		 * gives us the UID.  (There can be an arbitrary # of
340 		 * session setup packet exchanges to complete
341 		 * "extended security" authentication.)
342 		 *
343 		 * However, if the server gave us a UID of 0 in a
344 		 * Session Setup andX reply, and we then do a
345 		 * Tree Connect andX and get back a TID, we should
346 		 * use that TID, not 0, in subsequent references to
347 		 * that tree (e.g., in NetShareEnum RAP requests).
348 		 *
349 		 * So, for now, we forcibly zero out the TID only if we're
350 		 * doing extended security, as that's the only time
351 		 * that "vc_smbuid" should be explicitly zeroed.
352 		 *
353 		 * note we must and do use SMB_TID_UNKNOWN for SMB_COM_ECHO
354 		 */
355 		if (!vcp->vc_smbuid &&
356 		    (vcp->vc_hflags2 & SMB_FLAGS2_EXT_SEC))
357 			*rqp->sr_rqtid = htoles(0);
358 		else
359 			*rqp->sr_rqtid =
360 			    htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
361 		mb_fixhdr(&rqp->sr_rq);
362 	}
363 	if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */
364 		smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART);
365 		/*
366 		 * If all attempts to send a request failed, then
367 		 * something is seriously hosed.
368 		 */
369 		return (ENOTCONN);
370 	}
371 
372 	/*
373 	 * Replaced m_copym() with Solaris copymsg() which does the same
374 	 * work when we want to do a M_COPYALL.
375 	 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0);
376 	 */
377 	m = copymsg(rqp->sr_rq.mb_top);
378 
379 #ifdef DTRACE_PROBE
380 	DTRACE_PROBE2(smb_iod_sendrq,
381 	    (smb_rq_t *), rqp, (mblk_t *), m);
382 #else
383 	SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
384 #endif
385 	m_dumpm(m);
386 
387 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, p) : ENOBUFS;
388 	m = 0; /* consumed by SEND */
389 	if (error == 0) {
390 		SMBRQ_LOCK(rqp);
391 		rqp->sr_flags |= SMBR_SENT;
392 		rqp->sr_state = SMBRQ_SENT;
393 		if (rqp->sr_flags & SMBR_SENDWAIT)
394 			cv_broadcast(&rqp->sr_cond);
395 		SMBRQ_UNLOCK(rqp);
396 		return (0);
397 	}
398 	/*
399 	 * Check for fatal errors
400 	 */
401 	if (SMB_TRAN_FATAL(vcp, error)) {
402 		/*
403 		 * No further attempts should be made
404 		 */
405 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
406 		return (ENOTCONN);
407 	}
408 	if (error)
409 		SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error);
410 
411 #ifdef APPLE
412 	/* If proc waiting on rqp was signaled... */
413 	if (smb_rq_intr(rqp))
414 		smb_iod_rqprocessed(rqp, EINTR, 0);
415 #endif
416 
417 	return (0);
418 }
419 
420 static int
421 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp)
422 {
423 	struct proc *p = curproc;
424 	mblk_t *m;
425 	uchar_t *hp;
426 	int error;
427 
428 top:
429 	m = NULL;
430 	error = SMB_TRAN_RECV(vcp, &m, p);
431 	if (error == EAGAIN)
432 		goto top;
433 	if (error)
434 		return (error);
435 	ASSERT(m);
436 
437 	m = m_pullup(m, SMB_HDRLEN);
438 	if (m == NULL) {
439 		return (ENOSR);
440 	}
441 
442 	/*
443 	 * Check the SMB header
444 	 */
445 	hp = mtod(m, uchar_t *);
446 	if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
447 		m_freem(m);
448 		return (EPROTO);
449 	}
450 
451 	*mpp = m;
452 	return (0);
453 }
454 
455 /*
456  * Process incoming packets
457  *
458  * This is the "reader" loop, run by the IOD thread
459  * while in state SMBIOD_ST_VCACTIVE.  The loop now
460  * simply blocks in the socket recv until either a
461  * message arrives, or a disconnect.
462  */
463 static void
464 smb_iod_recvall(struct smb_vc *vcp)
465 {
466 	struct smb_rq *rqp;
467 	mblk_t *m;
468 	uchar_t *hp;
469 	ushort_t mid;
470 	int error;
471 	int etime_count = 0; /* for "server not responding", etc. */
472 
473 	for (;;) {
474 
475 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
476 			SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
477 			error = EIO;
478 			break;
479 		}
480 
481 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
482 			SMBIODEBUG("SHUTDOWN set\n");
483 			error = EIO;
484 			break;
485 		}
486 
487 		m = NULL;
488 		error = smb_iod_recv1(vcp, &m);
489 
490 		if ((error == ETIME) && vcp->iod_rqwaiting) {
491 			/*
492 			 * Nothing received for 15 seconds,
493 			 * and we have requests waiting.
494 			 */
495 			etime_count++;
496 
497 			/*
498 			 * Once, at 15 sec. notify callbacks
499 			 * and print the warning message.
500 			 */
501 			if (etime_count == 1) {
502 				smb_iod_notify_down(vcp);
503 				zprintf(vcp->vc_zoneid,
504 				    "SMB server %s not responding\n",
505 				    vcp->vc_srvname);
506 			}
507 
508 			/*
509 			 * At 30 sec. try sending an echo, and then
510 			 * once a minute thereafter. It's tricky to
511 			 * do a send from the IOD thread because
512 			 * we don't want to block here.
513 			 *
514 			 * Using tmo=SMBNOREPLYWAIT in the request
515 			 * so smb_rq_reply will skip smb_iod_waitrq.
516 			 * The smb_smb_echo call uses SMBR_INTERNAL
517 			 * to avoid calling smb_iod_sendall().
518 			 */
519 			if ((etime_count & 3) == 2) {
520 				smb_smb_echo(vcp, &vcp->vc_scred,
521 				    SMBNOREPLYWAIT);
522 			}
523 
524 			continue;
525 		} /* ETIME && iod_rqwaiting */
526 
527 		if (error == ETIME) {
528 			/*
529 			 * If the IOD thread holds the last reference
530 			 * to this VC, disconnect, release, terminate.
531 			 * Usually can avoid the lock/unlock here.
532 			 * Note, in-line: _vc_kill ... _vc_gone
533 			 */
534 			if (vcp->vc_co.co_usecount > 1)
535 				continue;
536 			SMB_VC_LOCK(vcp);
537 			if (vcp->vc_co.co_usecount == 1 &&
538 			    (vcp->vc_flags & SMBV_GONE) == 0) {
539 				vcp->vc_flags |= SMBV_GONE;
540 				SMB_VC_UNLOCK(vcp);
541 				smb_iod_disconnect(vcp);
542 				continue; /* wait for ACK */
543 			}
544 			SMB_VC_UNLOCK(vcp);
545 			continue;
546 		} /* error == ETIME */
547 
548 		if (error) {
549 			/*
550 			 * It's dangerous to continue here.
551 			 * (possible infinite loop!)
552 			 */
553 			break;
554 		}
555 
556 		/*
557 		 * Received something.  Yea!
558 		 */
559 		if (etime_count) {
560 			etime_count = 0;
561 
562 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
563 			    vcp->vc_srvname);
564 
565 			smb_iod_notify_up(vcp);
566 		}
567 
568 		/*
569 		 * Have an SMB packet.  The SMB header was
570 		 * checked in smb_iod_recv1().
571 		 * Find the request...
572 		 */
573 		hp = mtod(m, uchar_t *);
574 		/*LINTED*/
575 		mid = SMB_HDRMID(hp);
576 		SMBIODEBUG("mid %04x\n", (uint_t)mid);
577 
578 		rw_enter(&vcp->iod_rqlock, RW_READER);
579 		TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
580 
581 			if (rqp->sr_mid != mid)
582 				continue;
583 
584 			DTRACE_PROBE2(smb_iod_recvrq,
585 			    (smb_rq_t *), rqp, (mblk_t *), m);
586 			m_dumpm(m);
587 
588 			SMBRQ_LOCK(rqp);
589 			if (rqp->sr_rp.md_top == NULL) {
590 				md_initm(&rqp->sr_rp, m);
591 			} else {
592 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
593 					md_append_record(&rqp->sr_rp, m);
594 				} else {
595 					SMBRQ_UNLOCK(rqp);
596 					SMBSDEBUG("duplicate response %d "
597 					    "(ignored)\n", mid);
598 					break;
599 				}
600 			}
601 			smb_iod_rqprocessed_LH(rqp, 0, 0);
602 			SMBRQ_UNLOCK(rqp);
603 			break;
604 		}
605 
606 		if (rqp == NULL) {
607 			int cmd = SMB_HDRCMD(hp);
608 
609 			if (cmd != SMB_COM_ECHO)
610 				SMBSDEBUG("drop resp: mid %d, cmd %d\n",
611 				    (uint_t)mid, cmd);
612 /*			smb_printrqlist(vcp); */
613 			m_freem(m);
614 		}
615 		rw_exit(&vcp->iod_rqlock);
616 
617 	}
618 #ifdef APPLE
619 	/*
620 	 * check for interrupts
621 	 * On Solaris, handle in smb_iod_waitrq
622 	 */
623 	rw_enter(&vcp->iod_rqlock, RW_READER);
624 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
625 		if (smb_sigintr(rqp->sr_cred->scr_vfsctx))
626 			smb_iod_rqprocessed(rqp, EINTR, 0);
627 	}
628 	rw_exit(&vcp->iod_rqlock);
629 #endif
630 }
631 
632 /*
633  * Looks like we don't need these callbacks,
634  * but keep the code for now (for Apple).
635  */
636 /*ARGSUSED*/
637 void
638 smb_iod_notify_down(struct smb_vc *vcp)
639 {
640 #ifdef NEED_SMBFS_CALLBACKS
641 	struct smb_connobj *co;
642 
643 	if (fscb == NULL)
644 		return;
645 
646 	/*
647 	 * Walk the share list, notify...
648 	 * Was: smbfs_down(...share->ss_mount);
649 	 * XXX: Ok to hold vc_lock here?
650 	 */
651 	SMB_VC_LOCK(vcp);
652 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
653 		/* smbfs_down() */
654 		fscb->fscb_down(CPTOSS(co));
655 	}
656 	SMB_VC_UNLOCK(vcp);
657 #endif /* NEED_SMBFS_CALLBACKS */
658 }
659 
660 /*ARGSUSED*/
661 void
662 smb_iod_notify_up(struct smb_vc *vcp)
663 {
664 #ifdef NEED_SMBFS_CALLBACKS
665 	struct smb_connobj *co;
666 
667 	if (fscb == NULL)
668 		return;
669 
670 	/*
671 	 * Walk the share list, notify...
672 	 * Was: smbfs_up(...share->ss_mount);
673 	 * XXX: Ok to hold vc_lock here?
674 	 */
675 	SMB_VC_LOCK(vcp);
676 	SLIST_FOREACH(co, &(VCTOCP(vcp)->co_children), co_next) {
677 		/* smbfs_up() */
678 		fscb->fscb_up(CPTOSS(co));
679 	}
680 	SMB_VC_UNLOCK(vcp);
681 #endif /* NEED_SMBFS_CALLBACKS */
682 }
683 
684 /*
685  * The IOD thread is now just a "reader",
686  * so no more smb_iod_request().  Yea!
687  */
688 
689 /*
690  * Place request in the queue, and send it now if possible.
691  * Called with no locks held.
692  */
693 int
694 smb_iod_addrq(struct smb_rq *rqp)
695 {
696 	struct smb_vc *vcp = rqp->sr_vc;
697 	int error, save_newrq;
698 
699 	SMBIODEBUG("entry, mid=%d\n", rqp->sr_mid);
700 
701 	ASSERT(rqp->sr_cred);
702 
703 	/* This helps a little with debugging. */
704 	rqp->sr_owner = curthread;
705 
706 	if (rqp->sr_flags & SMBR_INTERNAL) {
707 		/*
708 		 * This is some kind of internal request,
709 		 * i.e. negotiate, session setup, echo...
710 		 * Allow vc_state < SMBIOD_ST_VCACTIVE, and
711 		 * always send directly from this thread.
712 		 * May be called by the IOD thread (echo).
713 		 * Note lock order: iod_rqlist, vc_sendlock
714 		 */
715 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
716 		TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link);
717 		rw_downgrade(&vcp->iod_rqlock);
718 
719 		/*
720 		 * Note: iod_sendrq expects vc_sendlock,
721 		 * so take that here, but carefully:
722 		 * Never block the IOD thread here.
723 		 */
724 		if (curthread == vcp->iod_thr) {
725 			if (sema_tryp(&vcp->vc_sendlock) == 0) {
726 				SMBIODEBUG("sendlock busy\n");
727 				error = EAGAIN;
728 			} else {
729 				/* Have vc_sendlock */
730 				error = smb_iod_sendrq(rqp);
731 				sema_v(&vcp->vc_sendlock);
732 			}
733 		} else {
734 			sema_p(&vcp->vc_sendlock);
735 			error = smb_iod_sendrq(rqp);
736 			sema_v(&vcp->vc_sendlock);
737 		}
738 
739 		rw_exit(&vcp->iod_rqlock);
740 		if (error)
741 			smb_iod_removerq(rqp);
742 
743 		return (error);
744 	}
745 
746 	/*
747 	 * Normal request from the driver or smbfs.
748 	 * State should be correct after the check in
749 	 * smb_rq_enqueue(), but we dropped locks...
750 	 */
751 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
752 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
753 		return (ENOTCONN);
754 	}
755 
756 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
757 
758 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
759 
760 	/* iod_rqlock/WRITER protects iod_newrq */
761 	save_newrq = vcp->iod_newrq;
762 	vcp->iod_newrq++;
763 
764 	rw_exit(&vcp->iod_rqlock);
765 
766 	/*
767 	 * Now send any requests that need to be sent,
768 	 * including the one we just put on the list.
769 	 * Only the thread that found iod_newrq==0
770 	 * needs to run the send loop.
771 	 */
772 	if (save_newrq == 0)
773 		smb_iod_sendall(vcp);
774 
775 	return (0);
776 }
777 
778 /*
779  * Mark an SMBR_MULTIPACKET request as
780  * needing another send.  Similar to the
781  * "normal" part of smb_iod_addrq.
782  */
783 int
784 smb_iod_multirq(struct smb_rq *rqp)
785 {
786 	struct smb_vc *vcp = rqp->sr_vc;
787 	int save_newrq;
788 
789 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
790 
791 	if (rqp->sr_flags & SMBR_INTERNAL)
792 		return (EINVAL);
793 
794 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
795 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
796 		return (ENOTCONN);
797 	}
798 
799 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
800 
801 	/* Already on iod_rqlist, just reset state. */
802 	rqp->sr_state = SMBRQ_NOTSENT;
803 
804 	/* iod_rqlock/WRITER protects iod_newrq */
805 	save_newrq = vcp->iod_newrq;
806 	vcp->iod_newrq++;
807 
808 	rw_exit(&vcp->iod_rqlock);
809 
810 	/*
811 	 * Now send any requests that need to be sent,
812 	 * including the one we just marked NOTSENT.
813 	 * Only the thread that found iod_newrq==0
814 	 * needs to run the send loop.
815 	 */
816 	if (save_newrq == 0)
817 		smb_iod_sendall(vcp);
818 
819 	return (0);
820 }
821 
822 
823 int
824 smb_iod_removerq(struct smb_rq *rqp)
825 {
826 	struct smb_vc *vcp = rqp->sr_vc;
827 
828 	SMBIODEBUG("entry, mid=%d\n", rqp->sr_mid);
829 
830 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
831 #ifdef QUEUEDEBUG
832 	/*
833 	 * Make sure we have not already removed it.
834 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
835 	 * XXX: Don't like the constant 1 here...
836 	 */
837 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
838 #endif
839 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
840 	rw_exit(&vcp->iod_rqlock);
841 
842 	return (0);
843 }
844 
845 
846 /*
847  * Internal version of smb_iod_waitrq.
848  *
849  * This is used when there is no reader thread,
850  * so we have to do the recv here.  The request
851  * must have the SMBR_INTERNAL flag set.
852  */
853 static int
854 smb_iod_waitrq_internal(struct smb_rq *rqp)
855 {
856 	struct smb_vc *vcp = rqp->sr_vc;
857 	mblk_t *m;
858 	uchar_t *hp;
859 	int error;
860 	uint16_t mid;
861 	uint8_t cmd;
862 
863 	/* Make sure it's an internal request. */
864 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0) {
865 		SMBIODEBUG("not internal\n");
866 		return (EINVAL);
867 	}
868 
869 	/* Only simple requests allowed. */
870 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
871 		SMBIODEBUG("multipacket\n");
872 		return (EINVAL);
873 	}
874 
875 	/* Should not already have a response. */
876 	if (rqp->sr_rp.md_top) {
877 		DEBUG_ENTER("smb_iod_waitrq again?\n");
878 		return (0);
879 	}
880 
881 	/*
882 	 * The message recv loop.  Terminates when we
883 	 * receive the message we're looking for.
884 	 * Drop others, with complaints.
885 	 * Scaled-down version of smb_iod_recvall
886 	 */
887 	for (;;) {
888 		m = NULL;
889 		error = smb_iod_recv1(vcp, &m);
890 		if (error) {
891 			/*
892 			 * It's dangerous to continue here.
893 			 * (possible infinite loop!)
894 			 */
895 #if 0
896 			if (SMB_TRAN_FATAL(vcp, error)) {
897 				return (error);
898 			}
899 			continue;
900 #endif
901 			return (error);
902 		}
903 
904 		hp = mtod(m, uchar_t *);
905 		cmd = SMB_HDRCMD(hp);
906 		/*LINTED*/
907 		mid = SMB_HDRMID(hp);
908 
909 		SMBIODEBUG("cmd 0x%02x mid %04x\n",
910 		    (uint_t)cmd, (uint_t)mid);
911 		m_dumpm(m);
912 
913 		/*
914 		 * Normally, the MID will match.
915 		 * For internal requests, also
916 		 * match on the cmd to be safe.
917 		 */
918 		if (mid == rqp->sr_mid)
919 			break;
920 		if (cmd == rqp->sr_cmd) {
921 			SMBIODEBUG("cmd match but not mid!\n");
922 			break;
923 		}
924 
925 		SMBIODEBUG("drop nomatch\n");
926 		m_freem(m);
927 	}
928 
929 	/*
930 	 * Have the response we were waiting for.
931 	 * Simplified version of the code from
932 	 * smb_iod_recvall
933 	 */
934 	SMBRQ_LOCK(rqp);
935 	if (rqp->sr_rp.md_top == NULL) {
936 		md_initm(&rqp->sr_rp, m);
937 	} else {
938 		SMBIODEBUG("drop duplicate\n");
939 		m_freem(m);
940 	}
941 	SMBRQ_UNLOCK(rqp);
942 
943 	return (0);
944 }
945 
946 
947 /*
948  * Wait for a request to complete.
949  *
950  * For internal requests, see smb_iod_waitrq_internal.
951  * For normal requests, we need to deal with
952  * ioc_muxcnt dropping below vc_maxmux by
953  * making arrangements to send more...
954  */
955 int
956 smb_iod_waitrq(struct smb_rq *rqp)
957 {
958 	struct smb_vc *vcp = rqp->sr_vc;
959 	clock_t tr, tmo1, tmo2;
960 	int error, rc;
961 
962 	SMBIODEBUG("entry, cmd=0x%02x mid=0x%04x\n",
963 	    (uint_t)rqp->sr_cmd, (uint_t)rqp->sr_mid);
964 
965 	if (rqp->sr_flags & SMBR_INTERNAL) {
966 		ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
967 		error = smb_iod_waitrq_internal(rqp);
968 		smb_iod_removerq(rqp);
969 		return (error);
970 	}
971 
972 	/*
973 	 * Make sure this is NOT the IOD thread,
974 	 * or the wait below will always timeout.
975 	 */
976 	ASSERT(curthread != vcp->iod_thr);
977 
978 	atomic_inc_uint(&vcp->iod_rqwaiting);
979 	SMBRQ_LOCK(rqp);
980 
981 	/*
982 	 * First, wait for the request to be sent.  Normally the send
983 	 * has already happened by the time we get here.  However, if
984 	 * we have more than maxmux entries in the request list, our
985 	 * request may not be sent until other requests complete.
986 	 * The wait in this case is due to local I/O demands, so
987 	 * we don't want the server response timeout to apply.
988 	 *
989 	 * If a request is allowed to interrupt this wait, then the
990 	 * request is cancelled and never sent OTW.  Some kinds of
991 	 * requests should never be cancelled (i.e. close) and those
992 	 * are marked SMBR_NOINTR_SEND so they either go eventually,
993 	 * or a connection close will terminate them with ENOTCONN.
994 	 */
995 	while (rqp->sr_state == SMBRQ_NOTSENT) {
996 		rqp->sr_flags |= SMBR_SENDWAIT;
997 		if (rqp->sr_flags & SMBR_NOINTR_SEND) {
998 			cv_wait(&rqp->sr_cond, &rqp->sr_lock);
999 			rc = 1;
1000 		} else
1001 			rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock);
1002 		rqp->sr_flags &= ~SMBR_SENDWAIT;
1003 		if (rc == 0) {
1004 			SMBIODEBUG("EINTR in sendwait, mid=%u\n", rqp->sr_mid);
1005 			error = EINTR;
1006 			goto out;
1007 		}
1008 	}
1009 
1010 	/*
1011 	 * The request has been sent.  Now wait for the response,
1012 	 * with the timeout specified for this request.
1013 	 * Compute all the deadlines now, so we effectively
1014 	 * start the timer(s) after the request is sent.
1015 	 */
1016 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1017 		tmo1 = lbolt + SEC_TO_TICK(smb_timo_notice);
1018 	else
1019 		tmo1 = 0;
1020 	tmo2 = lbolt + SEC_TO_TICK(rqp->sr_timo);
1021 
1022 	/*
1023 	 * As above, we don't want to allow interrupt for some
1024 	 * requests like open, because we could miss a succesful
1025 	 * response and therefore "leak" a FID.  Such requests
1026 	 * are marked SMBR_NOINTR_RECV to prevent that.
1027 	 *
1028 	 * If "slow server" warnings are enabled, wait first
1029 	 * for the "notice" timeout, and warn if expired.
1030 	 */
1031 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
1032 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1033 			tr = cv_timedwait(&rqp->sr_cond,
1034 			    &rqp->sr_lock, tmo1);
1035 		else
1036 			tr = cv_timedwait_sig(&rqp->sr_cond,
1037 			    &rqp->sr_lock, tmo1);
1038 		if (tr == 0) {
1039 			error = EINTR;
1040 			goto out;
1041 		}
1042 		if (tr < 0) {
1043 #ifdef DTRACE_PROBE
1044 			DTRACE_PROBE1(smb_iod_waitrq1,
1045 			    (smb_rq_t *), rqp);
1046 #endif
1047 #ifdef NOT_YET
1048 			/* Want this to go ONLY to the user. */
1049 			uprintf("SMB server %s has not responded"
1050 			    " to request %d after %d seconds..."
1051 			    " (still waiting).\n", vcp->vc_srvname,
1052 			    rqp->sr_mid, smb_timo_notice);
1053 #endif
1054 		}
1055 	}
1056 
1057 	/*
1058 	 * Keep waiting until tmo2 is expired.
1059 	 */
1060 	while (rqp->sr_rpgen == rqp->sr_rplast) {
1061 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1062 			tr = cv_timedwait(&rqp->sr_cond,
1063 			    &rqp->sr_lock, tmo2);
1064 		else
1065 			tr = cv_timedwait_sig(&rqp->sr_cond,
1066 			    &rqp->sr_lock, tmo2);
1067 		if (tr == 0) {
1068 			error = EINTR;
1069 			goto out;
1070 		}
1071 		if (tr < 0) {
1072 #ifdef DTRACE_PROBE
1073 			DTRACE_PROBE1(smb_iod_waitrq2,
1074 			    (smb_rq_t *), rqp);
1075 #endif
1076 #ifdef NOT_YET
1077 			/* Want this to go ONLY to the user. */
1078 			uprintf("SMB server %s has not responded"
1079 			    " to request %d after %d seconds..."
1080 			    " (giving up).\n", vcp->vc_srvname,
1081 			    rqp->sr_mid, rqp->sr_timo);
1082 #endif
1083 			error = ETIME;
1084 			goto out;
1085 		}
1086 		/* got wakeup */
1087 	}
1088 	error = rqp->sr_lerror;
1089 	rqp->sr_rplast++;
1090 
1091 out:
1092 	SMBRQ_UNLOCK(rqp);
1093 	atomic_dec_uint(&vcp->iod_rqwaiting);
1094 
1095 	/*
1096 	 * MULTIPACKET request must stay in the list.
1097 	 * They may need additional responses.
1098 	 */
1099 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
1100 		smb_iod_removerq(rqp);
1101 
1102 	/*
1103 	 * Some request has been completed.
1104 	 * If we reached the mux limit,
1105 	 * re-run the send loop...
1106 	 */
1107 	if (vcp->iod_muxfull)
1108 		smb_iod_sendall(vcp);
1109 
1110 	return (error);
1111 }
1112 
1113 /*
1114  * Shutdown all outstanding I/O requests on the specified share with
1115  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
1116  * non-forced unmount; if this is a forced unmount, we have to shutdown
1117  * the requests as part of the unmount process.)
1118  */
1119 void
1120 smb_iod_shutdown_share(struct smb_share *ssp)
1121 {
1122 	struct smb_vc *vcp = SSTOVC(ssp);
1123 	struct smb_rq *rqp;
1124 
1125 	/*
1126 	 * Loop through the list of requests and shutdown the ones
1127 	 * that are for the specified share.
1128 	 */
1129 	rw_enter(&vcp->iod_rqlock, RW_READER);
1130 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1131 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
1132 			smb_iod_rqprocessed(rqp, EIO, 0);
1133 	}
1134 	rw_exit(&vcp->iod_rqlock);
1135 }
1136 
1137 /*
1138  * Send all requests that need sending.
1139  * Called from _addrq, _multirq, _waitrq
1140  */
1141 static void
1142 smb_iod_sendall(struct smb_vc *vcp)
1143 {
1144 	struct smb_rq *rqp;
1145 	int error, save_newrq, muxcnt;
1146 
1147 	/*
1148 	 * Clear "newrq" to make sure threads adding
1149 	 * new requests will run this function again.
1150 	 */
1151 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1152 	save_newrq = vcp->iod_newrq;
1153 	vcp->iod_newrq = 0;
1154 
1155 	/*
1156 	 * We only read iod_rqlist, so downgrade rwlock.
1157 	 * This allows the IOD to handle responses while
1158 	 * some requesting thread may be blocked in send.
1159 	 */
1160 	rw_downgrade(&vcp->iod_rqlock);
1161 
1162 	/* Expect to find about this many requests. */
1163 	SMBIODEBUG("top, save_newrq=%d\n", save_newrq);
1164 
1165 	/*
1166 	 * Serialize to prevent multiple senders.
1167 	 * Note lock order: iod_rqlock, vc_sendlock
1168 	 */
1169 	sema_p(&vcp->vc_sendlock);
1170 
1171 	/*
1172 	 * Walk the list of requests and send when possible.
1173 	 * We avoid having more than vc_maxmux requests
1174 	 * outstanding to the server by traversing only
1175 	 * vc_maxmux entries into this list.  Simple!
1176 	 */
1177 	ASSERT(vcp->vc_maxmux > 0);
1178 	error = muxcnt = 0;
1179 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1180 
1181 		if (vcp->vc_state == SMBIOD_ST_DEAD) {
1182 			error = ENOTCONN; /* stop everything! */
1183 			break;
1184 		}
1185 
1186 		if (rqp->sr_state == SMBRQ_NOTSENT) {
1187 			error = smb_iod_sendrq(rqp);
1188 			if (error)
1189 				break;
1190 		}
1191 
1192 		if (++muxcnt == vcp->vc_maxmux) {
1193 			SMBIODEBUG("muxcnt == vc_maxmux\n");
1194 			break;
1195 		}
1196 
1197 	}
1198 
1199 	/*
1200 	 * If we have vc_maxmux requests outstanding,
1201 	 * arrange for _waitrq to call _sendall as
1202 	 * requests are completed.
1203 	 */
1204 	vcp->iod_muxfull =
1205 	    (muxcnt < vcp->vc_maxmux) ? 0 : 1;
1206 
1207 	sema_v(&vcp->vc_sendlock);
1208 	rw_exit(&vcp->iod_rqlock);
1209 
1210 	if (error == ENOTCONN)
1211 		smb_iod_dead(vcp);
1212 
1213 }
1214 
1215 
1216 /*
1217  * "main" function for smbiod daemon thread
1218  */
1219 void
1220 smb_iod_main(struct smb_vc *vcp)
1221 {
1222 	kthread_t *thr = curthread;
1223 
1224 	SMBIODEBUG("entry\n");
1225 
1226 	SMBIODEBUG("Running, thr=0x%p\n", thr);
1227 
1228 	/*
1229 	 * Prevent race with thread that created us.
1230 	 * After we get this lock iod_thr is set.
1231 	 */
1232 	SMB_VC_LOCK(vcp);
1233 	ASSERT(thr == vcp->iod_thr);
1234 
1235 	/* Redundant with iod_thr, but may help debugging. */
1236 	vcp->iod_flags |= SMBIOD_RUNNING;
1237 	SMB_VC_UNLOCK(vcp);
1238 
1239 	/*
1240 	 * OK, this is a new reader thread.
1241 	 * In case of reconnect, tell any
1242 	 * old requests they can restart.
1243 	 */
1244 	smb_iod_invrq(vcp);
1245 
1246 	/*
1247 	 * Run the "reader" loop.
1248 	 */
1249 	smb_iod_recvall(vcp);
1250 
1251 	/*
1252 	 * The reader loop function returns only when
1253 	 * there's been a fatal error on the connection.
1254 	 */
1255 	smb_iod_dead(vcp);
1256 
1257 	/*
1258 	 * The reader thread is going away.  Clear iod_thr,
1259 	 * and wake up anybody waiting for us to quit.
1260 	 */
1261 	SMB_VC_LOCK(vcp);
1262 	vcp->iod_flags &= ~SMBIOD_RUNNING;
1263 	vcp->iod_thr = NULL;
1264 	cv_broadcast(&vcp->iod_exit);
1265 	SMB_VC_UNLOCK(vcp);
1266 
1267 	/*
1268 	 * This hold was taken in smb_iod_create()
1269 	 * when this thread was created.
1270 	 */
1271 	smb_vc_rele(vcp);
1272 
1273 	SMBIODEBUG("Exiting, p=0x%p\n", curproc);
1274 	zthread_exit();
1275 }
1276 
1277 /*
1278  * Create the reader thread.
1279  *
1280  * This happens when we are just about to
1281  * enter vc_state = SMBIOD_ST_VCACTIVE;
1282  * See smb_sm_ssnsetup()
1283  */
1284 int
1285 smb_iod_create(struct smb_vc *vcp)
1286 {
1287 	kthread_t *thr = NULL;
1288 	int error;
1289 
1290 	/*
1291 	 * Take a hold on the VC for the IOD thread.
1292 	 * This hold will be released when the IOD
1293 	 * thread terminates. (or on error below)
1294 	 */
1295 	smb_vc_hold(vcp);
1296 
1297 	SMB_VC_LOCK(vcp);
1298 
1299 	if (vcp->iod_thr != NULL) {
1300 		SMBIODEBUG("aready have an IOD?");
1301 		error = EIO;
1302 		goto out;
1303 	}
1304 
1305 	/*
1306 	 * Darwin code used: IOCreateThread(...)
1307 	 * In Solaris, we use...
1308 	 */
1309 	thr = zthread_create(
1310 	    NULL,	/* stack */
1311 	    0, /* stack size (default) */
1312 	    smb_iod_main, /* entry func... */
1313 	    vcp, /* ... and arg */
1314 	    0, /* len (of what?) */
1315 	    minclsyspri); /* priority */
1316 	if (thr == NULL) {
1317 		SMBERROR("can't start smbiod\n");
1318 		error = ENOMEM;
1319 		goto out;
1320 	}
1321 
1322 	/* Success! */
1323 	error = 0;
1324 	vcp->iod_thr = thr;
1325 
1326 out:
1327 	SMB_VC_UNLOCK(vcp);
1328 
1329 	if (error)
1330 		smb_vc_rele(vcp);
1331 
1332 	return (error);
1333 }
1334 
1335 /*
1336  * Called from smb_vc_free to do any
1337  * cleanup of our IOD (reader) thread.
1338  */
1339 int
1340 smb_iod_destroy(struct smb_vc *vcp)
1341 {
1342 	clock_t tmo;
1343 
1344 	/*
1345 	 * Let's try to make sure the IOD thread
1346 	 * goes away, by waiting for it to exit.
1347 	 * Normally, it's gone by now.
1348 	 *
1349 	 * Only wait for a second, because we're in the
1350 	 * teardown path and don't want to get stuck here.
1351 	 * Should not take long, or things are hosed...
1352 	 */
1353 	SMB_VC_LOCK(vcp);
1354 	if (vcp->iod_thr) {
1355 		vcp->iod_flags |= SMBIOD_SHUTDOWN;
1356 		tmo = lbolt + hz;
1357 		tmo = cv_timedwait(&vcp->iod_exit, &vcp->vc_lock, tmo);
1358 		if (tmo == -1) {
1359 			SMBERROR("IOD thread for %s did not exit?\n",
1360 			    vcp->vc_srvname);
1361 		}
1362 	}
1363 	if (vcp->iod_thr) {
1364 		/* This should not happen. */
1365 		SMBIODEBUG("IOD thread did not exit!\n");
1366 		/* Try harder? */
1367 		tsignal(vcp->iod_thr, SIGKILL);
1368 	}
1369 	SMB_VC_UNLOCK(vcp);
1370 
1371 	return (0);
1372 }
1373