14bff34e3Sthurlow /*
24bff34e3Sthurlow  * Copyright (c) 2000-2001 Boris Popov
34bff34e3Sthurlow  * All rights reserved.
44bff34e3Sthurlow  *
54bff34e3Sthurlow  * Redistribution and use in source and binary forms, with or without
64bff34e3Sthurlow  * modification, are permitted provided that the following conditions
74bff34e3Sthurlow  * are met:
84bff34e3Sthurlow  * 1. Redistributions of source code must retain the above copyright
94bff34e3Sthurlow  *    notice, this list of conditions and the following disclaimer.
104bff34e3Sthurlow  * 2. Redistributions in binary form must reproduce the above copyright
114bff34e3Sthurlow  *    notice, this list of conditions and the following disclaimer in the
124bff34e3Sthurlow  *    documentation and/or other materials provided with the distribution.
134bff34e3Sthurlow  * 3. All advertising materials mentioning features or use of this software
144bff34e3Sthurlow  *    must display the following acknowledgement:
154bff34e3Sthurlow  *    This product includes software developed by Boris Popov.
164bff34e3Sthurlow  * 4. Neither the name of the author nor the names of any co-contributors
174bff34e3Sthurlow  *    may be used to endorse or promote products derived from this software
184bff34e3Sthurlow  *    without specific prior written permission.
194bff34e3Sthurlow  *
204bff34e3Sthurlow  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
214bff34e3Sthurlow  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
224bff34e3Sthurlow  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
234bff34e3Sthurlow  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
244bff34e3Sthurlow  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
254bff34e3Sthurlow  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
264bff34e3Sthurlow  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
274bff34e3Sthurlow  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
284bff34e3Sthurlow  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
294bff34e3Sthurlow  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
304bff34e3Sthurlow  * SUCH DAMAGE.
314bff34e3Sthurlow  *
324bff34e3Sthurlow  * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $
334bff34e3Sthurlow  */
344bff34e3Sthurlow 
3542645588SGordon Ross /*
36613a2f6bSGordon Ross  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3742645588SGordon Ross  * Use is subject to license terms.
388329232eSGordon Ross  *
39adee6784SGordon Ross  * Portions Copyright (C) 2001 - 2013 Apple Inc. All rights reserved.
40*640abd28SGordon Ross  * Copyright 2019 Nexenta Systems, Inc.  All rights reserved.
4142645588SGordon Ross  */
424bff34e3Sthurlow 
434bff34e3Sthurlow #ifdef DEBUG
444bff34e3Sthurlow /* See sys/queue.h */
454bff34e3Sthurlow #define	QUEUEDEBUG 1
464bff34e3Sthurlow #endif
474bff34e3Sthurlow 
484bff34e3Sthurlow #include <sys/param.h>
494bff34e3Sthurlow #include <sys/systm.h>
504bff34e3Sthurlow #include <sys/atomic.h>
514bff34e3Sthurlow #include <sys/proc.h>
524bff34e3Sthurlow #include <sys/thread.h>
53613a2f6bSGordon Ross #include <sys/file.h>
544bff34e3Sthurlow #include <sys/kmem.h>
554bff34e3Sthurlow #include <sys/unistd.h>
564bff34e3Sthurlow #include <sys/mount.h>
574bff34e3Sthurlow #include <sys/vnode.h>
584bff34e3Sthurlow #include <sys/types.h>
594bff34e3Sthurlow #include <sys/ddi.h>
604bff34e3Sthurlow #include <sys/sunddi.h>
614bff34e3Sthurlow #include <sys/stream.h>
624bff34e3Sthurlow #include <sys/strsun.h>
634bff34e3Sthurlow #include <sys/time.h>
644bff34e3Sthurlow #include <sys/class.h>
654bff34e3Sthurlow #include <sys/disp.h>
664bff34e3Sthurlow #include <sys/cmn_err.h>
674bff34e3Sthurlow #include <sys/zone.h>
684bff34e3Sthurlow #include <sys/sdt.h>
694bff34e3Sthurlow 
704bff34e3Sthurlow #include <netsmb/smb_osdep.h>
714bff34e3Sthurlow 
724bff34e3Sthurlow #include <netsmb/smb.h>
73adee6784SGordon Ross #include <netsmb/smb2.h>
744bff34e3Sthurlow #include <netsmb/smb_conn.h>
754bff34e3Sthurlow #include <netsmb/smb_rq.h>
76adee6784SGordon Ross #include <netsmb/smb2_rq.h>
774bff34e3Sthurlow #include <netsmb/smb_subr.h>
784bff34e3Sthurlow #include <netsmb/smb_tran.h>
794bff34e3Sthurlow #include <netsmb/smb_trantcp.h>
804bff34e3Sthurlow 
8140c0e231SGordon Ross /*
82adee6784SGordon Ross  * SMB messages are up to 64K.  Let's leave room for two.
83adee6784SGordon Ross  * If we negotiate up to SMB2, increase these. XXX todo
8440c0e231SGordon Ross  */
8540c0e231SGordon Ross static int smb_tcpsndbuf = 0x20000;
8640c0e231SGordon Ross static int smb_tcprcvbuf = 0x20000;
8740c0e231SGordon Ross static int smb_connect_timeout = 10; /* seconds */
8840c0e231SGordon Ross 
89adee6784SGordon Ross static int smb1_iod_process(smb_vc_t *, mblk_t *);
90adee6784SGordon Ross static int smb2_iod_process(smb_vc_t *, mblk_t *);
91adee6784SGordon Ross static int smb_iod_send_echo(smb_vc_t *, cred_t *cr);
92adee6784SGordon Ross static int smb_iod_logoff(struct smb_vc *vcp, cred_t *cr);
938329232eSGordon Ross 
944bff34e3Sthurlow /*
954bff34e3Sthurlow  * This is set/cleared when smbfs loads/unloads
964bff34e3Sthurlow  * No locks should be necessary, because smbfs
974bff34e3Sthurlow  * can't unload until all the mounts are gone.
984bff34e3Sthurlow  */
994bff34e3Sthurlow static smb_fscb_t *fscb;
10002d09e03SGordon Ross void
smb_fscb_set(smb_fscb_t * cb)1014bff34e3Sthurlow smb_fscb_set(smb_fscb_t *cb)
1024bff34e3Sthurlow {
1034bff34e3Sthurlow 	fscb = cb;
1044bff34e3Sthurlow }
1054bff34e3Sthurlow 
106613a2f6bSGordon Ross static void
smb_iod_share_disconnected(smb_share_t * ssp)107613a2f6bSGordon Ross smb_iod_share_disconnected(smb_share_t *ssp)
108613a2f6bSGordon Ross {
1094bff34e3Sthurlow 
110613a2f6bSGordon Ross 	smb_share_invalidate(ssp);
1114bff34e3Sthurlow 
112adee6784SGordon Ross 	/*
113adee6784SGordon Ross 	 * This is the only fscb hook smbfs currently uses.
114adee6784SGordon Ross 	 * Replaces smbfs_dead() from Darwin.
115adee6784SGordon Ross 	 */
116613a2f6bSGordon Ross 	if (fscb && fscb->fscb_disconn) {
117613a2f6bSGordon Ross 		fscb->fscb_disconn(ssp);
118613a2f6bSGordon Ross 	}
119613a2f6bSGordon Ross }
1204bff34e3Sthurlow 
1214bff34e3Sthurlow /*
122613a2f6bSGordon Ross  * State changes are important and infrequent.
123613a2f6bSGordon Ross  * Make them easily observable via dtrace.
1244bff34e3Sthurlow  */
125613a2f6bSGordon Ross void
smb_iod_newstate(struct smb_vc * vcp,int state)126613a2f6bSGordon Ross smb_iod_newstate(struct smb_vc *vcp, int state)
127613a2f6bSGordon Ross {
128613a2f6bSGordon Ross 	vcp->vc_state = state;
129613a2f6bSGordon Ross }
1304bff34e3Sthurlow 
1314bff34e3Sthurlow /* Lock Held version of the next function. */
1324bff34e3Sthurlow static inline void
smb_iod_rqprocessed_LH(struct smb_rq * rqp,int error,int flags)1334bff34e3Sthurlow smb_iod_rqprocessed_LH(
1344bff34e3Sthurlow 	struct smb_rq *rqp,
1354bff34e3Sthurlow 	int error,
1364bff34e3Sthurlow 	int flags)
1374bff34e3Sthurlow {
1384bff34e3Sthurlow 	rqp->sr_flags |= flags;
1394bff34e3Sthurlow 	rqp->sr_lerror = error;
1404bff34e3Sthurlow 	rqp->sr_rpgen++;
1414bff34e3Sthurlow 	rqp->sr_state = SMBRQ_NOTIFIED;
1424bff34e3Sthurlow 	cv_broadcast(&rqp->sr_cond);
1434bff34e3Sthurlow }
1444bff34e3Sthurlow 
1454bff34e3Sthurlow static void
smb_iod_rqprocessed(struct smb_rq * rqp,int error,int flags)1464bff34e3Sthurlow smb_iod_rqprocessed(
1474bff34e3Sthurlow 	struct smb_rq *rqp,
1484bff34e3Sthurlow 	int error,
1494bff34e3Sthurlow 	int flags)
1504bff34e3Sthurlow {
1514bff34e3Sthurlow 
1524bff34e3Sthurlow 	SMBRQ_LOCK(rqp);
1534bff34e3Sthurlow 	smb_iod_rqprocessed_LH(rqp, error, flags);
1544bff34e3Sthurlow 	SMBRQ_UNLOCK(rqp);
1554bff34e3Sthurlow }
1564bff34e3Sthurlow 
1574bff34e3Sthurlow static void
smb_iod_invrq(struct smb_vc * vcp)1584bff34e3Sthurlow smb_iod_invrq(struct smb_vc *vcp)
1594bff34e3Sthurlow {
1604bff34e3Sthurlow 	struct smb_rq *rqp;
1614bff34e3Sthurlow 
1624bff34e3Sthurlow 	/*
1634bff34e3Sthurlow 	 * Invalidate all outstanding requests for this connection
164adee6784SGordon Ross 	 * Also wakeup iod_muxwant waiters.
1654bff34e3Sthurlow 	 */
1664bff34e3Sthurlow 	rw_enter(&vcp->iod_rqlock, RW_READER);
1674bff34e3Sthurlow 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
1684bff34e3Sthurlow 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
1694bff34e3Sthurlow 	}
1704bff34e3Sthurlow 	rw_exit(&vcp->iod_rqlock);
171adee6784SGordon Ross 	cv_broadcast(&vcp->iod_muxwait);
1724bff34e3Sthurlow }
1734bff34e3Sthurlow 
1744bff34e3Sthurlow /*
175adee6784SGordon Ross  * Called by smb_vc_rele/smb_vc_kill on last ref, and by
176adee6784SGordon Ross  * the driver close function if the IOD closes its minor.
177adee6784SGordon Ross  * In those cases, the caller should be the IOD thread.
178613a2f6bSGordon Ross  *
179adee6784SGordon Ross  * Forcibly kill the connection.
1804bff34e3Sthurlow  */
18102d09e03SGordon Ross void
smb_iod_disconnect(struct smb_vc * vcp)182613a2f6bSGordon Ross smb_iod_disconnect(struct smb_vc *vcp)
1834bff34e3Sthurlow {
1844bff34e3Sthurlow 
185613a2f6bSGordon Ross 	/*
186613a2f6bSGordon Ross 	 * Inform everyone of the state change.
187613a2f6bSGordon Ross 	 */
1884bff34e3Sthurlow 	SMB_VC_LOCK(vcp);
189613a2f6bSGordon Ross 	if (vcp->vc_state != SMBIOD_ST_DEAD) {
190613a2f6bSGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
191613a2f6bSGordon Ross 		cv_broadcast(&vcp->vc_statechg);
1924bff34e3Sthurlow 	}
1934bff34e3Sthurlow 	SMB_VC_UNLOCK(vcp);
1944bff34e3Sthurlow 
195613a2f6bSGordon Ross 	SMB_TRAN_DISCONNECT(vcp);
1964bff34e3Sthurlow }
1974bff34e3Sthurlow 
1984bff34e3Sthurlow /*
1994bff34e3Sthurlow  * Send one request.
2004bff34e3Sthurlow  *
201adee6784SGordon Ross  * SMB1 only
202adee6784SGordon Ross  *
2034bff34e3Sthurlow  * Called by _addrq (for internal requests)
2049c9af259SGordon Ross  * and _sendall (via _addrq, _multirq, _waitrq)
205adee6784SGordon Ross  * Errors are reported via the smb_rq, using:
206adee6784SGordon Ross  *   smb_iod_rqprocessed(rqp, ...)
2074bff34e3Sthurlow  */
208adee6784SGordon Ross static void
smb1_iod_sendrq(struct smb_rq * rqp)209adee6784SGordon Ross smb1_iod_sendrq(struct smb_rq *rqp)
2104bff34e3Sthurlow {
2114bff34e3Sthurlow 	struct smb_vc *vcp = rqp->sr_vc;
2124bff34e3Sthurlow 	mblk_t *m;
2134bff34e3Sthurlow 	int error;
2144bff34e3Sthurlow 
2154bff34e3Sthurlow 	ASSERT(vcp);
216adee6784SGordon Ross 	ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
217adee6784SGordon Ross 	ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
2184bff34e3Sthurlow 
2194bff34e3Sthurlow 	/*
22040c0e231SGordon Ross 	 * Internal requests are allowed in any state;
22140c0e231SGordon Ross 	 * otherwise should be active.
2224bff34e3Sthurlow 	 */
22340c0e231SGordon Ross 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
22440c0e231SGordon Ross 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
225613a2f6bSGordon Ross 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
226adee6784SGordon Ross 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
227adee6784SGordon Ross 		return;
2284bff34e3Sthurlow 	}
2294bff34e3Sthurlow 
230613a2f6bSGordon Ross 	/*
231adee6784SGordon Ross 	 * Overwrite the SMB header with the assigned MID and
232adee6784SGordon Ross 	 * (if we're signing) sign it.
233613a2f6bSGordon Ross 	 */
234adee6784SGordon Ross 	smb_rq_fillhdr(rqp);
235adee6784SGordon Ross 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
236adee6784SGordon Ross 		smb_rq_sign(rqp);
237adee6784SGordon Ross 	}
2384bff34e3Sthurlow 
239adee6784SGordon Ross 	/*
240adee6784SGordon Ross 	 * The transport send consumes the message and we'd
241adee6784SGordon Ross 	 * prefer to keep a copy, so dupmsg() before sending.
242adee6784SGordon Ross 	 */
243adee6784SGordon Ross 	m = dupmsg(rqp->sr_rq.mb_top);
244adee6784SGordon Ross 	if (m == NULL) {
245adee6784SGordon Ross 		error = ENOBUFS;
246adee6784SGordon Ross 		goto fatal;
247adee6784SGordon Ross 	}
248613a2f6bSGordon Ross 
249adee6784SGordon Ross #ifdef DTRACE_PROBE2
250adee6784SGordon Ross 	DTRACE_PROBE2(iod_sendrq,
251adee6784SGordon Ross 	    (smb_rq_t *), rqp, (mblk_t *), m);
252adee6784SGordon Ross #endif
253613a2f6bSGordon Ross 
254adee6784SGordon Ross 	error = SMB_TRAN_SEND(vcp, m);
255adee6784SGordon Ross 	m = 0; /* consumed by SEND */
2569c9af259SGordon Ross 
257adee6784SGordon Ross 	rqp->sr_lerror = error;
258adee6784SGordon Ross 	if (error == 0) {
259adee6784SGordon Ross 		SMBRQ_LOCK(rqp);
260adee6784SGordon Ross 		rqp->sr_flags |= SMBR_SENT;
261adee6784SGordon Ross 		rqp->sr_state = SMBRQ_SENT;
262adee6784SGordon Ross 		SMBRQ_UNLOCK(rqp);
263adee6784SGordon Ross 		return;
2644bff34e3Sthurlow 	}
265adee6784SGordon Ross 	/*
266adee6784SGordon Ross 	 * Transport send returned an error.
267adee6784SGordon Ross 	 * Was it a fatal one?
268adee6784SGordon Ross 	 */
269adee6784SGordon Ross 	if (SMB_TRAN_FATAL(vcp, error)) {
2704bff34e3Sthurlow 		/*
271adee6784SGordon Ross 		 * No further attempts should be made
2724bff34e3Sthurlow 		 */
273adee6784SGordon Ross 	fatal:
274adee6784SGordon Ross 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
275adee6784SGordon Ross 		smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
276adee6784SGordon Ross 		return;
2774bff34e3Sthurlow 	}
278adee6784SGordon Ross }
279adee6784SGordon Ross 
280adee6784SGordon Ross /*
281adee6784SGordon Ross  * Send one request.
282adee6784SGordon Ross  *
283adee6784SGordon Ross  * SMB2 only
284adee6784SGordon Ross  *
285adee6784SGordon Ross  * Called by _addrq (for internal requests)
286adee6784SGordon Ross  * and _sendall (via _addrq, _multirq, _waitrq)
287adee6784SGordon Ross  * Errors are reported via the smb_rq, using:
288adee6784SGordon Ross  *   smb_iod_rqprocessed(rqp, ...)
289adee6784SGordon Ross  */
290adee6784SGordon Ross static void
smb2_iod_sendrq(struct smb_rq * rqp)291adee6784SGordon Ross smb2_iod_sendrq(struct smb_rq *rqp)
292adee6784SGordon Ross {
293adee6784SGordon Ross 	struct smb_rq *c_rqp;	/* compound */
294adee6784SGordon Ross 	struct smb_vc *vcp = rqp->sr_vc;
295adee6784SGordon Ross 	mblk_t *top_m;
296adee6784SGordon Ross 	mblk_t *cur_m;
297adee6784SGordon Ross 	int error;
298adee6784SGordon Ross 
299adee6784SGordon Ross 	ASSERT(vcp);
300adee6784SGordon Ross 	ASSERT(RW_WRITE_HELD(&vcp->iod_rqlock));
301adee6784SGordon Ross 	ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
3024bff34e3Sthurlow 
3034bff34e3Sthurlow 	/*
304adee6784SGordon Ross 	 * Internal requests are allowed in any state;
305adee6784SGordon Ross 	 * otherwise should be active.
3064bff34e3Sthurlow 	 */
307adee6784SGordon Ross 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
308adee6784SGordon Ross 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
309adee6784SGordon Ross 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
310adee6784SGordon Ross 		smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART);
311adee6784SGordon Ross 		return;
312adee6784SGordon Ross 	}
3134bff34e3Sthurlow 
314adee6784SGordon Ross 	/*
315adee6784SGordon Ross 	 * Overwrite the SMB header with the assigned MID and
316adee6784SGordon Ross 	 * (if we're signing) sign it.  If there are compounded
317adee6784SGordon Ross 	 * requests after the top one, do those too.
318adee6784SGordon Ross 	 */
319adee6784SGordon Ross 	smb2_rq_fillhdr(rqp);
320adee6784SGordon Ross 	if (rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
321adee6784SGordon Ross 		smb2_rq_sign(rqp);
322adee6784SGordon Ross 	}
323adee6784SGordon Ross 	c_rqp = rqp->sr2_compound_next;
324adee6784SGordon Ross 	while (c_rqp != NULL) {
325adee6784SGordon Ross 		smb2_rq_fillhdr(c_rqp);
326adee6784SGordon Ross 		if (c_rqp->sr2_rqflags & SMB2_FLAGS_SIGNED) {
327adee6784SGordon Ross 			smb2_rq_sign(c_rqp);
328adee6784SGordon Ross 		}
329adee6784SGordon Ross 		c_rqp = c_rqp->sr2_compound_next;
330adee6784SGordon Ross 	}
3314bff34e3Sthurlow 
332adee6784SGordon Ross 	/*
333adee6784SGordon Ross 	 * The transport send consumes the message and we'd
334adee6784SGordon Ross 	 * prefer to keep a copy, so dupmsg() before sending.
335adee6784SGordon Ross 	 * We also need this to build the compound message
336adee6784SGordon Ross 	 * that we'll actually send.  The message offset at
337adee6784SGordon Ross 	 * the start of each compounded message should be
338adee6784SGordon Ross 	 * eight-byte aligned.  The caller preparing the
339adee6784SGordon Ross 	 * compounded request has to take care of that
340adee6784SGordon Ross 	 * before we get here and sign messages etc.
341adee6784SGordon Ross 	 */
342adee6784SGordon Ross 	top_m = dupmsg(rqp->sr_rq.mb_top);
343adee6784SGordon Ross 	if (top_m == NULL) {
344613a2f6bSGordon Ross 		error = ENOBUFS;
345adee6784SGordon Ross 		goto fatal;
346adee6784SGordon Ross 	}
347adee6784SGordon Ross 	c_rqp = rqp->sr2_compound_next;
348adee6784SGordon Ross 	while (c_rqp != NULL) {
349adee6784SGordon Ross 		size_t len = msgdsize(top_m);
350adee6784SGordon Ross 		ASSERT((len & 7) == 0);
351adee6784SGordon Ross 		cur_m = dupmsg(c_rqp->sr_rq.mb_top);
352adee6784SGordon Ross 		if (cur_m == NULL) {
353adee6784SGordon Ross 			freemsg(top_m);
354adee6784SGordon Ross 			error = ENOBUFS;
355adee6784SGordon Ross 			goto fatal;
356adee6784SGordon Ross 		}
357adee6784SGordon Ross 		linkb(top_m, cur_m);
358adee6784SGordon Ross 	}
359adee6784SGordon Ross 
360adee6784SGordon Ross 	DTRACE_PROBE2(iod_sendrq,
361adee6784SGordon Ross 	    (smb_rq_t *), rqp, (mblk_t *), top_m);
362adee6784SGordon Ross 
363adee6784SGordon Ross 	error = SMB_TRAN_SEND(vcp, top_m);
364adee6784SGordon Ross 	top_m = 0; /* consumed by SEND */
365613a2f6bSGordon Ross 
366613a2f6bSGordon Ross 	rqp->sr_lerror = error;
3674bff34e3Sthurlow 	if (error == 0) {
3684bff34e3Sthurlow 		SMBRQ_LOCK(rqp);
3694bff34e3Sthurlow 		rqp->sr_flags |= SMBR_SENT;
3704bff34e3Sthurlow 		rqp->sr_state = SMBRQ_SENT;
3714bff34e3Sthurlow 		SMBRQ_UNLOCK(rqp);
372adee6784SGordon Ross 		return;
3734bff34e3Sthurlow 	}
3744bff34e3Sthurlow 	/*
375adee6784SGordon Ross 	 * Transport send returned an error.
376adee6784SGordon Ross 	 * Was it a fatal one?
3774bff34e3Sthurlow 	 */
3784bff34e3Sthurlow 	if (SMB_TRAN_FATAL(vcp, error)) {
3794bff34e3Sthurlow 		/*
3804bff34e3Sthurlow 		 * No further attempts should be made
3814bff34e3Sthurlow 		 */
382adee6784SGordon Ross 	fatal:
3834bff34e3Sthurlow 		SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error);
384adee6784SGordon Ross 		smb_iod_rqprocessed(rqp, error, SMBR_RESTART);
385adee6784SGordon Ross 		return;
3864bff34e3Sthurlow 	}
3874bff34e3Sthurlow }
3884bff34e3Sthurlow 
389adee6784SGordon Ross /*
390adee6784SGordon Ross  * Receive one NetBIOS (or NBT over TCP) message.  If none have arrived,
391adee6784SGordon Ross  * wait up to SMB_NBTIMO (15 sec.) for one to arrive, and then if still
392adee6784SGordon Ross  * none have arrived, return ETIME.
393adee6784SGordon Ross  */
3944bff34e3Sthurlow static int
smb_iod_recvmsg(struct smb_vc * vcp,mblk_t ** mpp)395adee6784SGordon Ross smb_iod_recvmsg(struct smb_vc *vcp, mblk_t **mpp)
3964bff34e3Sthurlow {
3974bff34e3Sthurlow 	mblk_t *m;
3984bff34e3Sthurlow 	int error;
3994bff34e3Sthurlow 
4004bff34e3Sthurlow top:
4014bff34e3Sthurlow 	m = NULL;
402613a2f6bSGordon Ross 	error = SMB_TRAN_RECV(vcp, &m);
4034bff34e3Sthurlow 	if (error == EAGAIN)
4044bff34e3Sthurlow 		goto top;
4054bff34e3Sthurlow 	if (error)
4064bff34e3Sthurlow 		return (error);
407adee6784SGordon Ross 	ASSERT(m != NULL);
4084bff34e3Sthurlow 
409adee6784SGordon Ross 	m = m_pullup(m, 4);
4104bff34e3Sthurlow 	if (m == NULL) {
4114bff34e3Sthurlow 		return (ENOSR);
4124bff34e3Sthurlow 	}
4134bff34e3Sthurlow 
4144bff34e3Sthurlow 	*mpp = m;
4154bff34e3Sthurlow 	return (0);
4164bff34e3Sthurlow }
4174bff34e3Sthurlow 
418adee6784SGordon Ross /*
419adee6784SGordon Ross  * How long should we keep around an unused VC (connection)?
420adee6784SGordon Ross  * There's usually a good chance connections will be reused,
421adee6784SGordon Ross  * so the default is to keep such connections for 5 min.
422adee6784SGordon Ross  */
423adee6784SGordon Ross #ifdef	DEBUG
424adee6784SGordon Ross int smb_iod_idle_keep_time = 60;	/* seconds */
425adee6784SGordon Ross #else
426adee6784SGordon Ross int smb_iod_idle_keep_time = 300;	/* seconds */
427adee6784SGordon Ross #endif
428adee6784SGordon Ross 
4294bff34e3Sthurlow /*
4304bff34e3Sthurlow  * Process incoming packets
4314bff34e3Sthurlow  *
43240c0e231SGordon Ross  * This is the "reader" loop, run by the IOD thread.  Normally we're in
43340c0e231SGordon Ross  * state SMBIOD_ST_VCACTIVE here, but during reconnect we're called in
43440c0e231SGordon Ross  * other states with poll==TRUE
435613a2f6bSGordon Ross  *
43640c0e231SGordon Ross  * A non-zero error return here causes the IOD work loop to terminate.
4374bff34e3Sthurlow  */
438613a2f6bSGordon Ross int
smb_iod_recvall(struct smb_vc * vcp,boolean_t poll)43940c0e231SGordon Ross smb_iod_recvall(struct smb_vc *vcp, boolean_t poll)
4404bff34e3Sthurlow {
4414bff34e3Sthurlow 	mblk_t *m;
442613a2f6bSGordon Ross 	int error = 0;
443adee6784SGordon Ross 	int etime_idle = 0;	/* How many 15 sec. "ticks" idle. */
444adee6784SGordon Ross 	int etime_count = 0;	/* ... and when we have requests. */
4454bff34e3Sthurlow 
4464bff34e3Sthurlow 	for (;;) {
447613a2f6bSGordon Ross 		/*
448613a2f6bSGordon Ross 		 * Check whether someone "killed" this VC,
449613a2f6bSGordon Ross 		 * or is asking the IOD to terminate.
450613a2f6bSGordon Ross 		 */
4514bff34e3Sthurlow 		if (vcp->iod_flags & SMBIOD_SHUTDOWN) {
4524bff34e3Sthurlow 			SMBIODEBUG("SHUTDOWN set\n");
453613a2f6bSGordon Ross 			/* This IOD thread will terminate. */
454613a2f6bSGordon Ross 			SMB_VC_LOCK(vcp);
455613a2f6bSGordon Ross 			smb_iod_newstate(vcp, SMBIOD_ST_DEAD);
456613a2f6bSGordon Ross 			cv_broadcast(&vcp->vc_statechg);
457613a2f6bSGordon Ross 			SMB_VC_UNLOCK(vcp);
458613a2f6bSGordon Ross 			error = EINTR;
4594bff34e3Sthurlow 			break;
4604bff34e3Sthurlow 		}
4614bff34e3Sthurlow 
4624bff34e3Sthurlow 		m = NULL;
463adee6784SGordon Ross 		error = smb_iod_recvmsg(vcp, &m);
4644bff34e3Sthurlow 
46540c0e231SGordon Ross 		/*
46640c0e231SGordon Ross 		 * Internal requests (reconnecting) call this in a loop
46740c0e231SGordon Ross 		 * (with poll==TRUE) until the request completes.
46840c0e231SGordon Ross 		 */
46940c0e231SGordon Ross 		if (error == ETIME && poll)
47040c0e231SGordon Ross 			break;
47140c0e231SGordon Ross 
472613a2f6bSGordon Ross 		if (error == ETIME &&
473613a2f6bSGordon Ross 		    vcp->iod_rqlist.tqh_first != NULL) {
474adee6784SGordon Ross 
4754bff34e3Sthurlow 			/*
476adee6784SGordon Ross 			 * Nothing received and requests waiting.
477adee6784SGordon Ross 			 * Increment etime_count.  If we were idle,
478adee6784SGordon Ross 			 * skip the 1st tick, because we started
479adee6784SGordon Ross 			 * waiting before there were any requests.
4804bff34e3Sthurlow 			 */
481adee6784SGordon Ross 			if (etime_idle != 0) {
482adee6784SGordon Ross 				etime_idle = 0;
483adee6784SGordon Ross 			} else if (etime_count < INT16_MAX) {
484adee6784SGordon Ross 				etime_count++;
485adee6784SGordon Ross 			}
4864bff34e3Sthurlow 
4874bff34e3Sthurlow 			/*
488adee6784SGordon Ross 			 * ETIME and requests in the queue.
489adee6784SGordon Ross 			 * The first time (at 15 sec.)
490adee6784SGordon Ross 			 * Log an error (just once).
4914bff34e3Sthurlow 			 */
492adee6784SGordon Ross 			if (etime_count > 0 &&
493adee6784SGordon Ross 			    vcp->iod_noresp == B_FALSE) {
494adee6784SGordon Ross 				vcp->iod_noresp = B_TRUE;
4954bff34e3Sthurlow 				zprintf(vcp->vc_zoneid,
4964bff34e3Sthurlow 				    "SMB server %s not responding\n",
4974bff34e3Sthurlow 				    vcp->vc_srvname);
4984bff34e3Sthurlow 			}
4994bff34e3Sthurlow 			/*
500adee6784SGordon Ross 			 * At 30 sec. try sending an echo, which
501adee6784SGordon Ross 			 * should cause some response.
5024bff34e3Sthurlow 			 */
503adee6784SGordon Ross 			if (etime_count == 2) {
504adee6784SGordon Ross 				SMBIODEBUG("send echo\n");
505adee6784SGordon Ross 				(void) smb_iod_send_echo(vcp, CRED());
506adee6784SGordon Ross 			}
507adee6784SGordon Ross 			/*
508adee6784SGordon Ross 			 * At 45 sec. give up on the connection
509adee6784SGordon Ross 			 * and try to reconnect.
510adee6784SGordon Ross 			 */
511adee6784SGordon Ross 			if (etime_count == 3) {
512adee6784SGordon Ross 				SMB_VC_LOCK(vcp);
513adee6784SGordon Ross 				smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
514adee6784SGordon Ross 				SMB_VC_UNLOCK(vcp);
515adee6784SGordon Ross 				SMB_TRAN_DISCONNECT(vcp);
516adee6784SGordon Ross 				break;
5174bff34e3Sthurlow 			}
5184bff34e3Sthurlow 			continue;
519adee6784SGordon Ross 		} /* ETIME and requests in the queue */
5204bff34e3Sthurlow 
521adee6784SGordon Ross 		if (error == ETIME) {
5224bff34e3Sthurlow 			/*
523adee6784SGordon Ross 			 * Nothing received and no active requests.
524adee6784SGordon Ross 			 *
525adee6784SGordon Ross 			 * If we've received nothing from the server for
526adee6784SGordon Ross 			 * smb_iod_idle_keep_time seconds, and the IOD
527adee6784SGordon Ross 			 * thread holds the last reference to this VC,
528adee6784SGordon Ross 			 * move to state IDLE and drop the TCP session.
529adee6784SGordon Ross 			 * The IDLE handler will destroy the VC unless
530adee6784SGordon Ross 			 * vc_state goes to RECONNECT before then.
5314bff34e3Sthurlow 			 */
532adee6784SGordon Ross 			etime_count = 0;
533adee6784SGordon Ross 			if (etime_idle < INT16_MAX)
534adee6784SGordon Ross 				etime_idle++;
535adee6784SGordon Ross 			if ((etime_idle * SMB_NBTIMO) <
536adee6784SGordon Ross 			    smb_iod_idle_keep_time)
5374bff34e3Sthurlow 				continue;
5384bff34e3Sthurlow 			SMB_VC_LOCK(vcp);
539613a2f6bSGordon Ross 			if (vcp->vc_co.co_usecount == 1) {
54040c0e231SGordon Ross 				smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
5414bff34e3Sthurlow 				SMB_VC_UNLOCK(vcp);
542adee6784SGordon Ross 				SMBIODEBUG("logoff & disconnect\n");
543adee6784SGordon Ross 				(void) smb_iod_logoff(vcp, CRED());
544adee6784SGordon Ross 				SMB_TRAN_DISCONNECT(vcp);
545613a2f6bSGordon Ross 				error = 0;
54642645588SGordon Ross 				break;
5474bff34e3Sthurlow 			}
5484bff34e3Sthurlow 			SMB_VC_UNLOCK(vcp);
5494bff34e3Sthurlow 			continue;
5504bff34e3Sthurlow 		} /* error == ETIME */
5514bff34e3Sthurlow 
5524bff34e3Sthurlow 		if (error) {
5534bff34e3Sthurlow 			/*
554adee6784SGordon Ross 			 * The recv above returned an error indicating
555adee6784SGordon Ross 			 * that our TCP session is no longer usable.
556adee6784SGordon Ross 			 * Disconnect the session and get ready to
557adee6784SGordon Ross 			 * reconnect.  If we have pending requests,
558adee6784SGordon Ross 			 * move to state reconnect immediately;
559adee6784SGordon Ross 			 * otherwise move to state IDLE until a
560adee6784SGordon Ross 			 * request is issued on this VC.
5614bff34e3Sthurlow 			 */
562613a2f6bSGordon Ross 			SMB_VC_LOCK(vcp);
563adee6784SGordon Ross 			if (vcp->iod_rqlist.tqh_first != NULL)
56440c0e231SGordon Ross 				smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
56540c0e231SGordon Ross 			else
566adee6784SGordon Ross 				smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
567613a2f6bSGordon Ross 			cv_broadcast(&vcp->vc_statechg);
568613a2f6bSGordon Ross 			SMB_VC_UNLOCK(vcp);
569adee6784SGordon Ross 			SMB_TRAN_DISCONNECT(vcp);
5704bff34e3Sthurlow 			break;
5714bff34e3Sthurlow 		}
5724bff34e3Sthurlow 
5734bff34e3Sthurlow 		/*
5744bff34e3Sthurlow 		 * Received something.  Yea!
5754bff34e3Sthurlow 		 */
576adee6784SGordon Ross 		etime_count = 0;
577adee6784SGordon Ross 		etime_idle = 0;
5784bff34e3Sthurlow 
579adee6784SGordon Ross 		/*
580adee6784SGordon Ross 		 * If we just completed a reconnect after logging
581adee6784SGordon Ross 		 * "SMB server %s not responding" then log OK now.
582adee6784SGordon Ross 		 */
583adee6784SGordon Ross 		if (vcp->iod_noresp) {
584adee6784SGordon Ross 			vcp->iod_noresp = B_FALSE;
5854bff34e3Sthurlow 			zprintf(vcp->vc_zoneid, "SMB server %s OK\n",
5864bff34e3Sthurlow 			    vcp->vc_srvname);
587adee6784SGordon Ross 		}
5884bff34e3Sthurlow 
589adee6784SGordon Ross 		if ((vcp->vc_flags & SMBV_SMB2) != 0) {
590adee6784SGordon Ross 			error = smb2_iod_process(vcp, m);
591adee6784SGordon Ross 		} else {
592adee6784SGordon Ross 			error = smb1_iod_process(vcp, m);
5934bff34e3Sthurlow 		}
5944bff34e3Sthurlow 
5954bff34e3Sthurlow 		/*
596adee6784SGordon Ross 		 * Reconnect calls this in a loop with poll=TRUE
597adee6784SGordon Ross 		 * We've received a response, so break now.
5984bff34e3Sthurlow 		 */
599adee6784SGordon Ross 		if (poll) {
600adee6784SGordon Ross 			error = 0;
601adee6784SGordon Ross 			break;
602adee6784SGordon Ross 		}
603adee6784SGordon Ross 	}
6044bff34e3Sthurlow 
605adee6784SGordon Ross 	return (error);
606adee6784SGordon Ross }
6074bff34e3Sthurlow 
608adee6784SGordon Ross /*
609adee6784SGordon Ross  * Have what should be an SMB1 reply.  Check and parse the header,
610adee6784SGordon Ross  * then use the message ID to find the request this belongs to and
611adee6784SGordon Ross  * post it on that request.
612adee6784SGordon Ross  *
613adee6784SGordon Ross  * Returns an error if the reader should give up.
614adee6784SGordon Ross  * To be safe, error if we read garbage.
615adee6784SGordon Ross  */
616adee6784SGordon Ross static int
smb1_iod_process(smb_vc_t * vcp,mblk_t * m)617adee6784SGordon Ross smb1_iod_process(smb_vc_t *vcp, mblk_t *m)
618adee6784SGordon Ross {
619adee6784SGordon Ross 	struct mdchain md;
620adee6784SGordon Ross 	struct smb_rq *rqp;
621adee6784SGordon Ross 	uint8_t cmd, sig[4];
622adee6784SGordon Ross 	uint16_t mid;
623adee6784SGordon Ross 	int err, skip;
6244bff34e3Sthurlow 
625adee6784SGordon Ross 	m = m_pullup(m, SMB_HDRLEN);
626adee6784SGordon Ross 	if (m == NULL)
627adee6784SGordon Ross 		return (ENOMEM);
6284bff34e3Sthurlow 
629adee6784SGordon Ross 	/*
630adee6784SGordon Ross 	 * Note: Intentionally do NOT md_done(&md)
631adee6784SGordon Ross 	 * because that would free the message and
632adee6784SGordon Ross 	 * we just want to peek here.
633adee6784SGordon Ross 	 */
634adee6784SGordon Ross 	md_initm(&md, m);
635adee6784SGordon Ross 
636adee6784SGordon Ross 	/*
637adee6784SGordon Ross 	 * Check the SMB header version and get the MID.
638adee6784SGordon Ross 	 *
639adee6784SGordon Ross 	 * The header version should be SMB1 except when we're
640adee6784SGordon Ross 	 * doing SMB1-to-SMB2 negotiation, in which case we may
641adee6784SGordon Ross 	 * see an SMB2 header with message ID=0 (only allowed in
642adee6784SGordon Ross 	 * vc_state == SMBIOD_ST_CONNECTED -- negotiationg).
643adee6784SGordon Ross 	 */
644adee6784SGordon Ross 	err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
645adee6784SGordon Ross 	if (err)
646adee6784SGordon Ross 		return (err);
647adee6784SGordon Ross 	if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
648adee6784SGordon Ross 		goto bad_hdr;
649adee6784SGordon Ross 	}
650adee6784SGordon Ross 	switch (sig[0]) {
651adee6784SGordon Ross 	case SMB_HDR_V1:	/* SMB1 */
652adee6784SGordon Ross 		md_get_uint8(&md, &cmd);
653adee6784SGordon Ross 		/* Skip to and get the MID. At offset 5 now. */
654adee6784SGordon Ross 		skip = SMB_HDR_OFF_MID - 5;
655adee6784SGordon Ross 		md_get_mem(&md, NULL, skip, MB_MSYSTEM);
656adee6784SGordon Ross 		err = md_get_uint16le(&md, &mid);
657adee6784SGordon Ross 		if (err)
658adee6784SGordon Ross 			return (err);
659adee6784SGordon Ross 		break;
660adee6784SGordon Ross 	case SMB_HDR_V2:	/* SMB2+ */
661adee6784SGordon Ross 		if (vcp->vc_state == SMBIOD_ST_CONNECTED) {
662adee6784SGordon Ross 			/*
663adee6784SGordon Ross 			 * No need to look, can only be
664adee6784SGordon Ross 			 * MID=0, cmd=negotiate
665adee6784SGordon Ross 			 */
666adee6784SGordon Ross 			cmd = SMB_COM_NEGOTIATE;
667adee6784SGordon Ross 			mid = 0;
6684bff34e3Sthurlow 			break;
6694bff34e3Sthurlow 		}
670adee6784SGordon Ross 		/* FALLTHROUGH */
671adee6784SGordon Ross 	bad_hdr:
672adee6784SGordon Ross 	default:
673adee6784SGordon Ross 		SMBIODEBUG("Bad SMB hdr\n");
674adee6784SGordon Ross 		m_freem(m);
675adee6784SGordon Ross 		return (EPROTO);
676adee6784SGordon Ross 	}
677adee6784SGordon Ross 
678adee6784SGordon Ross 	/*
679adee6784SGordon Ross 	 * Find the reqeuest and post the reply
680adee6784SGordon Ross 	 */
681adee6784SGordon Ross 	rw_enter(&vcp->iod_rqlock, RW_READER);
682adee6784SGordon Ross 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
6834bff34e3Sthurlow 
684adee6784SGordon Ross 		if (rqp->sr_mid != mid)
685adee6784SGordon Ross 			continue;
6864bff34e3Sthurlow 
687adee6784SGordon Ross 		DTRACE_PROBE2(iod_post_reply,
688adee6784SGordon Ross 		    (smb_rq_t *), rqp, (mblk_t *), m);
689adee6784SGordon Ross 		m_dumpm(m);
690adee6784SGordon Ross 
691adee6784SGordon Ross 		SMBRQ_LOCK(rqp);
692adee6784SGordon Ross 		if (rqp->sr_rp.md_top == NULL) {
693adee6784SGordon Ross 			md_initm(&rqp->sr_rp, m);
694adee6784SGordon Ross 		} else {
695adee6784SGordon Ross 			if (rqp->sr_flags & SMBR_MULTIPACKET) {
696adee6784SGordon Ross 				md_append_record(&rqp->sr_rp, m);
697adee6784SGordon Ross 			} else {
698adee6784SGordon Ross 				SMBRQ_UNLOCK(rqp);
699adee6784SGordon Ross 				rqp = NULL;
700adee6784SGordon Ross 				break;
701adee6784SGordon Ross 			}
7024bff34e3Sthurlow 		}
703adee6784SGordon Ross 		smb_iod_rqprocessed_LH(rqp, 0, 0);
704adee6784SGordon Ross 		SMBRQ_UNLOCK(rqp);
705adee6784SGordon Ross 		break;
706adee6784SGordon Ross 	}
707adee6784SGordon Ross 	rw_exit(&vcp->iod_rqlock);
7084bff34e3Sthurlow 
709adee6784SGordon Ross 	if (rqp == NULL) {
710adee6784SGordon Ross 		if (cmd != SMB_COM_ECHO) {
711adee6784SGordon Ross 			SMBSDEBUG("drop resp: MID 0x%04x\n", (uint_t)mid);
712adee6784SGordon Ross 		}
713adee6784SGordon Ross 		m_freem(m);
71440c0e231SGordon Ross 		/*
715adee6784SGordon Ross 		 * Keep going.  It's possible this reply came
716adee6784SGordon Ross 		 * after the request timed out and went away.
71740c0e231SGordon Ross 		 */
718adee6784SGordon Ross 	}
719adee6784SGordon Ross 	return (0);
720adee6784SGordon Ross }
721adee6784SGordon Ross 
722adee6784SGordon Ross /*
723adee6784SGordon Ross  * Have what should be an SMB2 reply.  Check and parse the header,
724adee6784SGordon Ross  * then use the message ID to find the request this belongs to and
725adee6784SGordon Ross  * post it on that request.
726adee6784SGordon Ross  *
727adee6784SGordon Ross  * We also want to apply any credit grant in this reply now,
728adee6784SGordon Ross  * rather than waiting for the owner to wake up.
729adee6784SGordon Ross  */
730adee6784SGordon Ross static int
smb2_iod_process(smb_vc_t * vcp,mblk_t * m)731adee6784SGordon Ross smb2_iod_process(smb_vc_t *vcp, mblk_t *m)
732adee6784SGordon Ross {
733adee6784SGordon Ross 	struct mdchain md;
734adee6784SGordon Ross 	struct smb_rq *rqp;
735adee6784SGordon Ross 	uint8_t sig[4];
736adee6784SGordon Ross 	mblk_t *next_m = NULL;
737adee6784SGordon Ross 	uint64_t message_id, async_id;
738adee6784SGordon Ross 	uint32_t flags, next_cmd_off, status;
739adee6784SGordon Ross 	uint16_t command, credits_granted;
740adee6784SGordon Ross 	int err;
741adee6784SGordon Ross 
742adee6784SGordon Ross top:
743adee6784SGordon Ross 	m = m_pullup(m, SMB2_HDRLEN);
744adee6784SGordon Ross 	if (m == NULL)
745adee6784SGordon Ross 		return (ENOMEM);
746adee6784SGordon Ross 
747adee6784SGordon Ross 	/*
748adee6784SGordon Ross 	 * Note: Intentionally do NOT md_done(&md)
749adee6784SGordon Ross 	 * because that would free the message and
750adee6784SGordon Ross 	 * we just want to peek here.
751adee6784SGordon Ross 	 */
752adee6784SGordon Ross 	md_initm(&md, m);
753adee6784SGordon Ross 
754adee6784SGordon Ross 	/*
755adee6784SGordon Ross 	 * Check the SMB header.  Must be SMB2
756adee6784SGordon Ross 	 * (and later, could be SMB3 encrypted)
757adee6784SGordon Ross 	 */
758adee6784SGordon Ross 	err = md_get_mem(&md, sig, 4, MB_MSYSTEM);
759adee6784SGordon Ross 	if (err)
760adee6784SGordon Ross 		return (err);
761adee6784SGordon Ross 	if (sig[1] != 'S' || sig[2] != 'M' || sig[3] != 'B') {
762adee6784SGordon Ross 		goto bad_hdr;
763adee6784SGordon Ross 	}
764adee6784SGordon Ross 	switch (sig[0]) {
765adee6784SGordon Ross 	case SMB_HDR_V2:
766adee6784SGordon Ross 		break;
767adee6784SGordon Ross 	case SMB_HDR_V3E:
768adee6784SGordon Ross 		/*
769adee6784SGordon Ross 		 * Todo: If encryption enabled, decrypt the message
770adee6784SGordon Ross 		 * and restart processing on the cleartext.
771adee6784SGordon Ross 		 */
772adee6784SGordon Ross 		/* FALLTHROUGH */
773adee6784SGordon Ross 	bad_hdr:
774adee6784SGordon Ross 	default:
775adee6784SGordon Ross 		SMBIODEBUG("Bad SMB2 hdr\n");
776adee6784SGordon Ross 		m_freem(m);
777adee6784SGordon Ross 		return (EPROTO);
778adee6784SGordon Ross 	}
779adee6784SGordon Ross 
780adee6784SGordon Ross 	/*
781adee6784SGordon Ross 	 * Parse the rest of the SMB2 header,
782adee6784SGordon Ross 	 * skipping what we don't need.
783adee6784SGordon Ross 	 */
784adee6784SGordon Ross 	md_get_uint32le(&md, NULL);	/* length, credit_charge */
785adee6784SGordon Ross 	md_get_uint32le(&md, &status);
786adee6784SGordon Ross 	md_get_uint16le(&md, &command);
787adee6784SGordon Ross 	md_get_uint16le(&md, &credits_granted);
788adee6784SGordon Ross 	md_get_uint32le(&md, &flags);
789adee6784SGordon Ross 	md_get_uint32le(&md, &next_cmd_off);
790adee6784SGordon Ross 	md_get_uint64le(&md, &message_id);
791adee6784SGordon Ross 	if (flags & SMB2_FLAGS_ASYNC_COMMAND) {
792adee6784SGordon Ross 		md_get_uint64le(&md, &async_id);
793adee6784SGordon Ross 	} else {
794adee6784SGordon Ross 		/* PID, TID (not needed) */
795adee6784SGordon Ross 		async_id = 0;
796adee6784SGordon Ross 	}
797adee6784SGordon Ross 
798adee6784SGordon Ross 	/*
799adee6784SGordon Ross 	 * If this is a compound reply, split it.
800adee6784SGordon Ross 	 * Next must be 8-byte aligned.
801adee6784SGordon Ross 	 */
802adee6784SGordon Ross 	if (next_cmd_off != 0) {
803adee6784SGordon Ross 		if ((next_cmd_off & 7) != 0)
804adee6784SGordon Ross 			SMBIODEBUG("Misaligned next cmd\n");
805adee6784SGordon Ross 		else
806adee6784SGordon Ross 			next_m = m_split(m, next_cmd_off, 1);
807adee6784SGordon Ross 	}
808adee6784SGordon Ross 
809*640abd28SGordon Ross 	/*
810*640abd28SGordon Ross 	 * SMB2 Negotiate may return zero credits_granted,
811*640abd28SGordon Ross 	 * in which case we should assume it granted one.
812*640abd28SGordon Ross 	 */
813*640abd28SGordon Ross 	if (command == SMB2_NEGOTIATE && credits_granted == 0)
814*640abd28SGordon Ross 		credits_granted = 1;
815*640abd28SGordon Ross 
816adee6784SGordon Ross 	/*
817adee6784SGordon Ross 	 * Apply the credit grant
818adee6784SGordon Ross 	 */
819adee6784SGordon Ross 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
820adee6784SGordon Ross 	vcp->vc2_limit_message_id += credits_granted;
821adee6784SGordon Ross 
822adee6784SGordon Ross 	/*
823adee6784SGordon Ross 	 * Find the reqeuest and post the reply
824adee6784SGordon Ross 	 */
825adee6784SGordon Ross 	rw_downgrade(&vcp->iod_rqlock);
826adee6784SGordon Ross 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
827adee6784SGordon Ross 
828adee6784SGordon Ross 		if (rqp->sr2_messageid != message_id)
829adee6784SGordon Ross 			continue;
830adee6784SGordon Ross 
831adee6784SGordon Ross 		DTRACE_PROBE2(iod_post_reply,
832adee6784SGordon Ross 		    (smb_rq_t *), rqp, (mblk_t *), m);
833adee6784SGordon Ross 		m_dumpm(m);
834adee6784SGordon Ross 
835adee6784SGordon Ross 		/*
836adee6784SGordon Ross 		 * If this is an interim response, just save the
837adee6784SGordon Ross 		 * async ID but don't wakup the request.
838adee6784SGordon Ross 		 * Don't need SMBRQ_LOCK for this.
839adee6784SGordon Ross 		 */
840adee6784SGordon Ross 		if (status == NT_STATUS_PENDING && async_id != 0) {
841adee6784SGordon Ross 			rqp->sr2_rspasyncid = async_id;
842adee6784SGordon Ross 			m_freem(m);
843adee6784SGordon Ross 			break;
844adee6784SGordon Ross 		}
845adee6784SGordon Ross 
846adee6784SGordon Ross 		SMBRQ_LOCK(rqp);
847adee6784SGordon Ross 		if (rqp->sr_rp.md_top == NULL) {
848adee6784SGordon Ross 			md_initm(&rqp->sr_rp, m);
849adee6784SGordon Ross 		} else {
850adee6784SGordon Ross 			SMBRQ_UNLOCK(rqp);
851adee6784SGordon Ross 			rqp = NULL;
85240c0e231SGordon Ross 			break;
85340c0e231SGordon Ross 		}
854adee6784SGordon Ross 		smb_iod_rqprocessed_LH(rqp, 0, 0);
855adee6784SGordon Ross 		SMBRQ_UNLOCK(rqp);
856adee6784SGordon Ross 		break;
8574bff34e3Sthurlow 	}
858adee6784SGordon Ross 	rw_exit(&vcp->iod_rqlock);
859613a2f6bSGordon Ross 
860adee6784SGordon Ross 	if (rqp == NULL) {
861adee6784SGordon Ross 		if (command != SMB2_ECHO) {
862adee6784SGordon Ross 			SMBSDEBUG("drop resp: MID %lld\n",
863adee6784SGordon Ross 			    (long long)message_id);
864adee6784SGordon Ross 		}
865adee6784SGordon Ross 		m_freem(m);
866adee6784SGordon Ross 		/*
867adee6784SGordon Ross 		 * Keep going.  It's possible this reply came
868adee6784SGordon Ross 		 * after the request timed out and went away.
869adee6784SGordon Ross 		 */
870adee6784SGordon Ross 	}
871adee6784SGordon Ross 
872adee6784SGordon Ross 	/*
873adee6784SGordon Ross 	 * If we split a compound reply, continue with the
874adee6784SGordon Ross 	 * next part of the compound.
875adee6784SGordon Ross 	 */
876adee6784SGordon Ross 	if (next_m != NULL) {
877adee6784SGordon Ross 		m = next_m;
878adee6784SGordon Ross 		goto top;
879adee6784SGordon Ross 	}
880adee6784SGordon Ross 
881adee6784SGordon Ross 	return (0);
8824bff34e3Sthurlow }
8834bff34e3Sthurlow 
8844bff34e3Sthurlow /*
885613a2f6bSGordon Ross  * The IOD receiver thread has requests pending and
886613a2f6bSGordon Ross  * has not received anything in a while.  Try to
887613a2f6bSGordon Ross  * send an SMB echo request.  It's tricky to do a
888613a2f6bSGordon Ross  * send from the IOD thread because we can't block.
889613a2f6bSGordon Ross  *
890613a2f6bSGordon Ross  * Using tmo=SMBNOREPLYWAIT in the request
891613a2f6bSGordon Ross  * so smb_rq_reply will skip smb_iod_waitrq.
892613a2f6bSGordon Ross  * The smb_smb_echo call uses SMBR_INTERNAL
893613a2f6bSGordon Ross  * to avoid calling smb_iod_sendall().
8944bff34e3Sthurlow  */
895adee6784SGordon Ross static int
smb_iod_send_echo(smb_vc_t * vcp,cred_t * cr)896adee6784SGordon Ross smb_iod_send_echo(smb_vc_t *vcp, cred_t *cr)
8974bff34e3Sthurlow {
898613a2f6bSGordon Ross 	smb_cred_t scred;
899adee6784SGordon Ross 	int err, tmo = SMBNOREPLYWAIT;
900adee6784SGordon Ross 
901adee6784SGordon Ross 	ASSERT(vcp->iod_thr == curthread);
9024bff34e3Sthurlow 
903adee6784SGordon Ross 	smb_credinit(&scred, cr);
904adee6784SGordon Ross 	if ((vcp->vc_flags & SMBV_SMB2) != 0) {
905adee6784SGordon Ross 		err = smb2_smb_echo(vcp, &scred, tmo);
906adee6784SGordon Ross 	} else {
907adee6784SGordon Ross 		err = smb_smb_echo(vcp, &scred, tmo);
908adee6784SGordon Ross 	}
909613a2f6bSGordon Ross 	smb_credrele(&scred);
910613a2f6bSGordon Ross 	return (err);
9114bff34e3Sthurlow }
9124bff34e3Sthurlow 
913adee6784SGordon Ross /*
914adee6784SGordon Ross  * Helper for smb1_iod_addrq, smb2_iod_addrq
915adee6784SGordon Ross  * Returns zero if interrupted, else 1.
916adee6784SGordon Ross  */
917adee6784SGordon Ross static int
smb_iod_muxwait(smb_vc_t * vcp,boolean_t sig_ok)918adee6784SGordon Ross smb_iod_muxwait(smb_vc_t *vcp, boolean_t sig_ok)
919adee6784SGordon Ross {
920adee6784SGordon Ross 	int rc;
921adee6784SGordon Ross 
922adee6784SGordon Ross 	SMB_VC_LOCK(vcp);
923adee6784SGordon Ross 	vcp->iod_muxwant++;
924adee6784SGordon Ross 	if (sig_ok) {
925adee6784SGordon Ross 		rc = cv_wait_sig(&vcp->iod_muxwait, &vcp->vc_lock);
926adee6784SGordon Ross 	} else {
927adee6784SGordon Ross 		cv_wait(&vcp->iod_muxwait, &vcp->vc_lock);
928adee6784SGordon Ross 		rc = 1;
929adee6784SGordon Ross 	}
930adee6784SGordon Ross 	vcp->iod_muxwant--;
931adee6784SGordon Ross 	SMB_VC_UNLOCK(vcp);
932adee6784SGordon Ross 
933adee6784SGordon Ross 	return (rc);
934adee6784SGordon Ross }
9354bff34e3Sthurlow 
9364bff34e3Sthurlow /*
937adee6784SGordon Ross  * Place request in the queue, and send it.
9384bff34e3Sthurlow  * Called with no locks held.
939adee6784SGordon Ross  *
940adee6784SGordon Ross  * Called for SMB1 only
941adee6784SGordon Ross  *
942adee6784SGordon Ross  * The logic for how we limit active requests differs between
943adee6784SGordon Ross  * SMB1 and SMB2.  With SMB1 it's a simple counter ioc_muxcnt.
9444bff34e3Sthurlow  */
9454bff34e3Sthurlow int
smb1_iod_addrq(struct smb_rq * rqp)946adee6784SGordon Ross smb1_iod_addrq(struct smb_rq *rqp)
9474bff34e3Sthurlow {
9484bff34e3Sthurlow 	struct smb_vc *vcp = rqp->sr_vc;
949adee6784SGordon Ross 	uint16_t need;
950adee6784SGordon Ross 	boolean_t sig_ok =
951adee6784SGordon Ross 	    (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
9524bff34e3Sthurlow 
9534bff34e3Sthurlow 	ASSERT(rqp->sr_cred);
954adee6784SGordon Ross 	ASSERT((vcp->vc_flags & SMBV_SMB2) == 0);
955adee6784SGordon Ross 
956adee6784SGordon Ross 	rqp->sr_owner = curthread;
9574bff34e3Sthurlow 
958adee6784SGordon Ross 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
959adee6784SGordon Ross 
960adee6784SGordon Ross recheck:
961613a2f6bSGordon Ross 	/*
962adee6784SGordon Ross 	 * Internal requests can be added in any state,
963adee6784SGordon Ross 	 * but normal requests only in state active.
964613a2f6bSGordon Ross 	 */
965adee6784SGordon Ross 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
966adee6784SGordon Ross 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
967adee6784SGordon Ross 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
968adee6784SGordon Ross 		rw_exit(&vcp->iod_rqlock);
969adee6784SGordon Ross 		return (ENOTCONN);
970adee6784SGordon Ross 	}
9714bff34e3Sthurlow 
972adee6784SGordon Ross 	/*
973adee6784SGordon Ross 	 * If we're at the limit of active requests, block until
974adee6784SGordon Ross 	 * enough requests complete so we can make ours active.
975adee6784SGordon Ross 	 * Wakeup in smb_iod_removerq().
976adee6784SGordon Ross 	 *
977adee6784SGordon Ross 	 * Normal callers leave one slot free, so internal
978adee6784SGordon Ross 	 * callers can have the last slot if needed.
979adee6784SGordon Ross 	 */
980adee6784SGordon Ross 	need = 1;
981adee6784SGordon Ross 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
982adee6784SGordon Ross 		need++;
983adee6784SGordon Ross 	if ((vcp->iod_muxcnt + need) > vcp->vc_maxmux) {
984adee6784SGordon Ross 		rw_exit(&vcp->iod_rqlock);
985adee6784SGordon Ross 		if (rqp->sr_flags & SMBR_INTERNAL)
986adee6784SGordon Ross 			return (EBUSY);
987adee6784SGordon Ross 		if (smb_iod_muxwait(vcp, sig_ok) == 0)
988adee6784SGordon Ross 			return (EINTR);
9894bff34e3Sthurlow 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
990adee6784SGordon Ross 		goto recheck;
991adee6784SGordon Ross 	}
9924bff34e3Sthurlow 
993adee6784SGordon Ross 	/*
994adee6784SGordon Ross 	 * Add this request to the active list and send it.
995adee6784SGordon Ross 	 * For SMB2 we may have a sequence of compounded
996adee6784SGordon Ross 	 * requests, in which case we must add them all.
997adee6784SGordon Ross 	 * They're sent as a compound in smb2_iod_sendrq.
998adee6784SGordon Ross 	 */
999adee6784SGordon Ross 	rqp->sr_mid = vcp->vc_next_mid++;
1000adee6784SGordon Ross 	/* If signing, set the signing sequence numbers. */
1001adee6784SGordon Ross 	if (vcp->vc_mackey != NULL && (rqp->sr_rqflags2 &
1002adee6784SGordon Ross 	    SMB_FLAGS2_SECURITY_SIGNATURE) != 0) {
1003adee6784SGordon Ross 		rqp->sr_seqno = vcp->vc_next_seq++;
1004adee6784SGordon Ross 		rqp->sr_rseqno = vcp->vc_next_seq++;
1005adee6784SGordon Ross 	}
1006adee6784SGordon Ross 	vcp->iod_muxcnt++;
1007adee6784SGordon Ross 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1008adee6784SGordon Ross 	smb1_iod_sendrq(rqp);
10094bff34e3Sthurlow 
1010adee6784SGordon Ross 	rw_exit(&vcp->iod_rqlock);
1011adee6784SGordon Ross 	return (0);
1012adee6784SGordon Ross }
1013613a2f6bSGordon Ross 
1014adee6784SGordon Ross /*
1015adee6784SGordon Ross  * Place request in the queue, and send it.
1016adee6784SGordon Ross  * Called with no locks held.
1017adee6784SGordon Ross  *
1018adee6784SGordon Ross  * Called for SMB2 only.
1019adee6784SGordon Ross  *
1020adee6784SGordon Ross  * With SMB2 we have a range of valid message IDs, and we may
1021adee6784SGordon Ross  * only send requests when we can assign a message ID within
1022adee6784SGordon Ross  * the valid range.  We may need to wait here for some active
1023adee6784SGordon Ross  * request to finish (and update vc2_limit_message_id) before
1024adee6784SGordon Ross  * we can get message IDs for our new request(s).  Another
1025adee6784SGordon Ross  * difference is that the request sequence we're waiting to
1026adee6784SGordon Ross  * add here may require multipe message IDs, either due to
1027adee6784SGordon Ross  * either compounding or multi-credit requests.  Therefore
1028adee6784SGordon Ross  * we need to wait for the availibility of how ever many
1029adee6784SGordon Ross  * message IDs are required by our request sequence.
1030adee6784SGordon Ross  */
1031adee6784SGordon Ross int
smb2_iod_addrq(struct smb_rq * rqp)1032adee6784SGordon Ross smb2_iod_addrq(struct smb_rq *rqp)
1033adee6784SGordon Ross {
1034adee6784SGordon Ross 	struct smb_vc *vcp = rqp->sr_vc;
1035adee6784SGordon Ross 	struct smb_rq *c_rqp;	/* compound req */
1036adee6784SGordon Ross 	uint16_t charge;
1037adee6784SGordon Ross 	boolean_t sig_ok =
1038adee6784SGordon Ross 	    (rqp->sr_flags & SMBR_NOINTR_SEND) == 0;
10394bff34e3Sthurlow 
1040adee6784SGordon Ross 	ASSERT(rqp->sr_cred != NULL);
1041adee6784SGordon Ross 	ASSERT((vcp->vc_flags & SMBV_SMB2) != 0);
1042adee6784SGordon Ross 
1043adee6784SGordon Ross 	/*
1044adee6784SGordon Ross 	 * Figure out the credit charges
1045adee6784SGordon Ross 	 * No multi-credit messages yet.
1046adee6784SGordon Ross 	 */
1047adee6784SGordon Ross 	rqp->sr2_totalcreditcharge = rqp->sr2_creditcharge;
1048adee6784SGordon Ross 	c_rqp = rqp->sr2_compound_next;
1049adee6784SGordon Ross 	while (c_rqp != NULL) {
1050adee6784SGordon Ross 		rqp->sr2_totalcreditcharge += c_rqp->sr2_creditcharge;
1051adee6784SGordon Ross 		c_rqp = c_rqp->sr2_compound_next;
1052adee6784SGordon Ross 	}
1053adee6784SGordon Ross 
1054adee6784SGordon Ross 	/*
1055adee6784SGordon Ross 	 * Internal request must not be compounded
1056adee6784SGordon Ross 	 * and should use exactly one credit.
1057adee6784SGordon Ross 	 */
1058adee6784SGordon Ross 	if (rqp->sr_flags & SMBR_INTERNAL) {
1059adee6784SGordon Ross 		if (rqp->sr2_compound_next != NULL) {
1060adee6784SGordon Ross 			ASSERT(0);
1061adee6784SGordon Ross 			return (EINVAL);
106240c0e231SGordon Ross 		}
10634bff34e3Sthurlow 	}
10644bff34e3Sthurlow 
1065adee6784SGordon Ross 	rqp->sr_owner = curthread;
1066adee6784SGordon Ross 
10674bff34e3Sthurlow 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
10684bff34e3Sthurlow 
1069adee6784SGordon Ross recheck:
1070adee6784SGordon Ross 	/*
1071adee6784SGordon Ross 	 * Internal requests can be added in any state,
1072adee6784SGordon Ross 	 * but normal requests only in state active.
1073adee6784SGordon Ross 	 */
1074adee6784SGordon Ross 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0 &&
1075adee6784SGordon Ross 	    vcp->vc_state != SMBIOD_ST_VCACTIVE) {
1076adee6784SGordon Ross 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
1077adee6784SGordon Ross 		rw_exit(&vcp->iod_rqlock);
1078adee6784SGordon Ross 		return (ENOTCONN);
1079adee6784SGordon Ross 	}
10804bff34e3Sthurlow 
1081adee6784SGordon Ross 	/*
1082adee6784SGordon Ross 	 * If we're at the limit of active requests, block until
1083adee6784SGordon Ross 	 * enough requests complete so we can make ours active.
1084adee6784SGordon Ross 	 * Wakeup in smb_iod_removerq().
1085adee6784SGordon Ross 	 *
1086adee6784SGordon Ross 	 * Normal callers leave one slot free, so internal
1087adee6784SGordon Ross 	 * callers can have the last slot if needed.
1088adee6784SGordon Ross 	 */
1089adee6784SGordon Ross 	charge = rqp->sr2_totalcreditcharge;
1090adee6784SGordon Ross 	if ((rqp->sr_flags & SMBR_INTERNAL) == 0)
1091adee6784SGordon Ross 		charge++;
1092adee6784SGordon Ross 	if ((vcp->vc2_next_message_id + charge) >
1093adee6784SGordon Ross 	    vcp->vc2_limit_message_id) {
1094adee6784SGordon Ross 		rw_exit(&vcp->iod_rqlock);
1095adee6784SGordon Ross 		if (rqp->sr_flags & SMBR_INTERNAL)
1096adee6784SGordon Ross 			return (EBUSY);
1097adee6784SGordon Ross 		if (smb_iod_muxwait(vcp, sig_ok) == 0)
1098adee6784SGordon Ross 			return (EINTR);
1099adee6784SGordon Ross 		rw_enter(&vcp->iod_rqlock, RW_WRITER);
1100adee6784SGordon Ross 		goto recheck;
1101adee6784SGordon Ross 	}
11024bff34e3Sthurlow 
11034bff34e3Sthurlow 	/*
1104adee6784SGordon Ross 	 * Add this request to the active list and send it.
1105adee6784SGordon Ross 	 * For SMB2 we may have a sequence of compounded
1106adee6784SGordon Ross 	 * requests, in which case we must add them all.
1107adee6784SGordon Ross 	 * They're sent as a compound in smb2_iod_sendrq.
11084bff34e3Sthurlow 	 */
11094bff34e3Sthurlow 
1110adee6784SGordon Ross 	rqp->sr2_messageid = vcp->vc2_next_message_id;
1111adee6784SGordon Ross 	vcp->vc2_next_message_id += rqp->sr2_creditcharge;
1112adee6784SGordon Ross 	TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link);
1113adee6784SGordon Ross 
1114adee6784SGordon Ross 	c_rqp = rqp->sr2_compound_next;
1115adee6784SGordon Ross 	while (c_rqp != NULL) {
1116adee6784SGordon Ross 		c_rqp->sr2_messageid = vcp->vc2_next_message_id;
1117adee6784SGordon Ross 		vcp->vc2_next_message_id += c_rqp->sr2_creditcharge;
1118adee6784SGordon Ross 		TAILQ_INSERT_TAIL(&vcp->iod_rqlist, c_rqp, sr_link);
1119adee6784SGordon Ross 		c_rqp = c_rqp->sr2_compound_next;
1120adee6784SGordon Ross 	}
1121adee6784SGordon Ross 	smb2_iod_sendrq(rqp);
1122adee6784SGordon Ross 
1123adee6784SGordon Ross 	rw_exit(&vcp->iod_rqlock);
11244bff34e3Sthurlow 	return (0);
11254bff34e3Sthurlow }
11264bff34e3Sthurlow 
11274bff34e3Sthurlow /*
11284bff34e3Sthurlow  * Mark an SMBR_MULTIPACKET request as
11294bff34e3Sthurlow  * needing another send.  Similar to the
1130adee6784SGordon Ross  * "normal" part of smb1_iod_addrq.
1131adee6784SGordon Ross  * Only used by SMB1
11324bff34e3Sthurlow  */
11334bff34e3Sthurlow int
smb1_iod_multirq(struct smb_rq * rqp)1134adee6784SGordon Ross smb1_iod_multirq(struct smb_rq *rqp)
11354bff34e3Sthurlow {
11364bff34e3Sthurlow 	struct smb_vc *vcp = rqp->sr_vc;
11374bff34e3Sthurlow 
11384bff34e3Sthurlow 	ASSERT(rqp->sr_flags & SMBR_MULTIPACKET);
11394bff34e3Sthurlow 
1140adee6784SGordon Ross 	if (vcp->vc_flags & SMBV_SMB2) {
1141adee6784SGordon Ross 		ASSERT("!SMB2?");
1142adee6784SGordon Ross 		return (EINVAL);
1143adee6784SGordon Ross 	}
1144adee6784SGordon Ross 
11454bff34e3Sthurlow 	if (rqp->sr_flags & SMBR_INTERNAL)
11464bff34e3Sthurlow 		return (EINVAL);
11474bff34e3Sthurlow 
11484bff34e3Sthurlow 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
11494bff34e3Sthurlow 		SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state);
11504bff34e3Sthurlow 		return (ENOTCONN);
11514bff34e3Sthurlow 	}
11524bff34e3Sthurlow 
11534bff34e3Sthurlow 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
11544bff34e3Sthurlow 
11554bff34e3Sthurlow 	/* Already on iod_rqlist, just reset state. */
11564bff34e3Sthurlow 	rqp->sr_state = SMBRQ_NOTSENT;
1157adee6784SGordon Ross 	smb1_iod_sendrq(rqp);
11584bff34e3Sthurlow 
11594bff34e3Sthurlow 	rw_exit(&vcp->iod_rqlock);
11604bff34e3Sthurlow 
11614bff34e3Sthurlow 	return (0);
11624bff34e3Sthurlow }
11634bff34e3Sthurlow 
1164adee6784SGordon Ross /*
1165adee6784SGordon Ross  * Remove a request from the active list, and
1166adee6784SGordon Ross  * wake up requests waiting to go active.
1167adee6784SGordon Ross  *
1168adee6784SGordon Ross  * Shared by SMB1 + SMB2
1169adee6784SGordon Ross  *
1170adee6784SGordon Ross  * The logic for how we limit active requests differs between
1171adee6784SGordon Ross  * SMB1 and SMB2.  With SMB1 it's a simple counter ioc_muxcnt.
1172adee6784SGordon Ross  * With SMB2 we have a range of valid message IDs, and when we
1173adee6784SGordon Ross  * retire the oldest request we need to keep track of what is
1174adee6784SGordon Ross  * now the oldest message ID.  In both cases, after we take a
1175adee6784SGordon Ross  * request out of the list here, we should be able to wake up
1176adee6784SGordon Ross  * a request waiting to get in the active list.
1177adee6784SGordon Ross  */
117802d09e03SGordon Ross void
smb_iod_removerq(struct smb_rq * rqp)11794bff34e3Sthurlow smb_iod_removerq(struct smb_rq *rqp)
11804bff34e3Sthurlow {
1181adee6784SGordon Ross 	struct smb_rq *rqp2;
11824bff34e3Sthurlow 	struct smb_vc *vcp = rqp->sr_vc;
1183adee6784SGordon Ross 	boolean_t was_head = B_FALSE;
11844bff34e3Sthurlow 
11854bff34e3Sthurlow 	rw_enter(&vcp->iod_rqlock, RW_WRITER);
1186adee6784SGordon Ross 
11874bff34e3Sthurlow #ifdef QUEUEDEBUG
11884bff34e3Sthurlow 	/*
11894bff34e3Sthurlow 	 * Make sure we have not already removed it.
11904bff34e3Sthurlow 	 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE
11914bff34e3Sthurlow 	 * XXX: Don't like the constant 1 here...
11924bff34e3Sthurlow 	 */
11934bff34e3Sthurlow 	ASSERT(rqp->sr_link.tqe_next != (void *)1L);
11944bff34e3Sthurlow #endif
1195adee6784SGordon Ross 
1196adee6784SGordon Ross 	if (TAILQ_FIRST(&vcp->iod_rqlist) == rqp)
1197adee6784SGordon Ross 		was_head = B_TRUE;
11984bff34e3Sthurlow 	TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link);
1199adee6784SGordon Ross 	if (vcp->vc_flags & SMBV_SMB2) {
1200adee6784SGordon Ross 		rqp2 = TAILQ_FIRST(&vcp->iod_rqlist);
1201adee6784SGordon Ross 		if (was_head && rqp2 != NULL) {
1202adee6784SGordon Ross 			/* Do we still need this? */
1203adee6784SGordon Ross 			vcp->vc2_oldest_message_id =
1204adee6784SGordon Ross 			    rqp2->sr2_messageid;
1205adee6784SGordon Ross 		}
1206adee6784SGordon Ross 	} else {
1207adee6784SGordon Ross 		ASSERT(vcp->iod_muxcnt > 0);
1208adee6784SGordon Ross 		vcp->iod_muxcnt--;
1209adee6784SGordon Ross 	}
12104bff34e3Sthurlow 
1211adee6784SGordon Ross 	rw_exit(&vcp->iod_rqlock);
12124bff34e3Sthurlow 
1213adee6784SGordon Ross 	/*
1214adee6784SGordon Ross 	 * If there are requests waiting for "mux" slots,
1215adee6784SGordon Ross 	 * wake one.
1216adee6784SGordon Ross 	 */
1217adee6784SGordon Ross 	SMB_VC_LOCK(vcp);
1218adee6784SGordon Ross 	if (vcp->iod_muxwant != 0)
1219adee6784SGordon Ross 		cv_signal(&vcp->iod_muxwait);
1220adee6784SGordon Ross 	SMB_VC_UNLOCK(vcp);
1221adee6784SGordon Ross }
12224bff34e3Sthurlow 
12234bff34e3Sthurlow /*
12244bff34e3Sthurlow  * Wait for a request to complete.
12254bff34e3Sthurlow  */
12264bff34e3Sthurlow int
smb_iod_waitrq(struct smb_rq * rqp)12274bff34e3Sthurlow smb_iod_waitrq(struct smb_rq *rqp)
12284bff34e3Sthurlow {
12294bff34e3Sthurlow 	struct smb_vc *vcp = rqp->sr_vc;
12304bff34e3Sthurlow 	clock_t tr, tmo1, tmo2;
1231adee6784SGordon Ross 	int error;
12324bff34e3Sthurlow 
12334bff34e3Sthurlow 	if (rqp->sr_flags & SMBR_INTERNAL) {
1234adee6784SGordon Ross 		/* XXX - Do we ever take this path now? */
1235adee6784SGordon Ross 		return (smb_iod_waitrq_int(rqp));
12364bff34e3Sthurlow 	}
12374bff34e3Sthurlow 
12384bff34e3Sthurlow 	/*
12394bff34e3Sthurlow 	 * Make sure this is NOT the IOD thread,
1240613a2f6bSGordon Ross 	 * or the wait below will stop the reader.
12414bff34e3Sthurlow 	 */
12424bff34e3Sthurlow 	ASSERT(curthread != vcp->iod_thr);
12434bff34e3Sthurlow 
12444bff34e3Sthurlow 	SMBRQ_LOCK(rqp);
12454bff34e3Sthurlow 
12464bff34e3Sthurlow 	/*
12474bff34e3Sthurlow 	 * The request has been sent.  Now wait for the response,
12484bff34e3Sthurlow 	 * with the timeout specified for this request.
12494bff34e3Sthurlow 	 * Compute all the deadlines now, so we effectively
12504bff34e3Sthurlow 	 * start the timer(s) after the request is sent.
12514bff34e3Sthurlow 	 */
12524bff34e3Sthurlow 	if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo))
1253d3d50737SRafael Vanoni 		tmo1 = SEC_TO_TICK(smb_timo_notice);
12544bff34e3Sthurlow 	else
12554bff34e3Sthurlow 		tmo1 = 0;
1256d3d50737SRafael Vanoni 	tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo);
12574bff34e3Sthurlow 
12584bff34e3Sthurlow 	/*
12594bff34e3Sthurlow 	 * As above, we don't want to allow interrupt for some
12604bff34e3Sthurlow 	 * requests like open, because we could miss a succesful
12614bff34e3Sthurlow 	 * response and therefore "leak" a FID.  Such requests
12624bff34e3Sthurlow 	 * are marked SMBR_NOINTR_RECV to prevent that.
12634bff34e3Sthurlow 	 *
12644bff34e3Sthurlow 	 * If "slow server" warnings are enabled, wait first
12654bff34e3Sthurlow 	 * for the "notice" timeout, and warn if expired.
12664bff34e3Sthurlow 	 */
12674bff34e3Sthurlow 	if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) {
12684bff34e3Sthurlow 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
1269d3d50737SRafael Vanoni 			tr = cv_reltimedwait(&rqp->sr_cond,
1270d3d50737SRafael Vanoni 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
12714bff34e3Sthurlow 		else
1272d3d50737SRafael Vanoni 			tr = cv_reltimedwait_sig(&rqp->sr_cond,
1273d3d50737SRafael Vanoni 			    &rqp->sr_lock, tmo1, TR_CLOCK_TICK);
12744bff34e3Sthurlow 		if (tr == 0) {
12754bff34e3Sthurlow 			error = EINTR;
12764bff34e3Sthurlow 			goto out;
12774bff34e3Sthurlow 		}
12784bff34e3Sthurlow 		if (tr < 0) {
12794bff34e3Sthurlow 			DTRACE_PROBE1(smb_iod_waitrq1,
12804bff34e3Sthurlow 			    (smb_rq_t *), rqp);
12814bff34e3Sthurlow 		}
12824bff34e3Sthurlow 	}
12834bff34e3Sthurlow 
12844bff34e3Sthurlow 	/*
12854bff34e3Sthurlow 	 * Keep waiting until tmo2 is expired.
12864bff34e3Sthurlow 	 */
12874bff34e3Sthurlow 	while (rqp->sr_rpgen == rqp->sr_rplast) {
12884bff34e3Sthurlow 		if (rqp->sr_flags & SMBR_NOINTR_RECV)
12894bff34e3Sthurlow 			tr = cv_timedwait(&rqp->sr_cond,
12904bff34e3Sthurlow 			    &rqp->sr_lock, tmo2);
12914bff34e3Sthurlow 		else
12924bff34e3Sthurlow 			tr = cv_timedwait_sig(&rqp->sr_cond,
12934bff34e3Sthurlow 			    &rqp->sr_lock, tmo2);
12944bff34e3Sthurlow 		if (tr == 0) {
12954bff34e3Sthurlow 			error = EINTR;
12964bff34e3Sthurlow 			goto out;
12974bff34e3Sthurlow 		}
12984bff34e3Sthurlow 		if (tr < 0) {
12994bff34e3Sthurlow 			DTRACE_PROBE1(smb_iod_waitrq2,
13004bff34e3Sthurlow 			    (smb_rq_t *), rqp);
13014bff34e3Sthurlow 			error = ETIME;
13024bff34e3Sthurlow 			goto out;
13034bff34e3Sthurlow 		}
13044bff34e3Sthurlow 		/* got wakeup */
13054bff34e3Sthurlow 	}
13064bff34e3Sthurlow 	error = rqp->sr_lerror;
13074bff34e3Sthurlow 	rqp->sr_rplast++;
13084bff34e3Sthurlow 
13094bff34e3Sthurlow out:
13104bff34e3Sthurlow 	SMBRQ_UNLOCK(rqp);
13114bff34e3Sthurlow 
13124bff34e3Sthurlow 	/*
13134bff34e3Sthurlow 	 * MULTIPACKET request must stay in the list.
13144bff34e3Sthurlow 	 * They may need additional responses.
13154bff34e3Sthurlow 	 */
13164bff34e3Sthurlow 	if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0)
13174bff34e3Sthurlow 		smb_iod_removerq(rqp);
13184bff34e3Sthurlow 
1319adee6784SGordon Ross 	return (error);
1320adee6784SGordon Ross }
1321adee6784SGordon Ross 
1322adee6784SGordon Ross /*
1323adee6784SGordon Ross  * Internal variant of smb_iod_waitrq(), for use in
1324adee6784SGordon Ross  * requests run by the IOD (reader) thread itself.
1325adee6784SGordon Ross  * Block only long enough to receive one reply.
1326adee6784SGordon Ross  */
1327adee6784SGordon Ross int
smb_iod_waitrq_int(struct smb_rq * rqp)1328adee6784SGordon Ross smb_iod_waitrq_int(struct smb_rq *rqp)
1329adee6784SGordon Ross {
1330adee6784SGordon Ross 	struct smb_vc *vcp = rqp->sr_vc;
1331adee6784SGordon Ross 	int timeleft = rqp->sr_timo;
1332adee6784SGordon Ross 	int error;
1333adee6784SGordon Ross 
1334adee6784SGordon Ross 	ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0);
1335adee6784SGordon Ross again:
1336adee6784SGordon Ross 	error = smb_iod_recvall(vcp, B_TRUE);
1337adee6784SGordon Ross 	if (error == ETIME) {
1338adee6784SGordon Ross 		/* We waited SMB_NBTIMO sec. */
1339adee6784SGordon Ross 		timeleft -= SMB_NBTIMO;
1340adee6784SGordon Ross 		if (timeleft > 0)
1341adee6784SGordon Ross 			goto again;
1342adee6784SGordon Ross 	}
1343adee6784SGordon Ross 
1344adee6784SGordon Ross 	smb_iod_removerq(rqp);
1345adee6784SGordon Ross 	if (rqp->sr_state != SMBRQ_NOTIFIED)
1346adee6784SGordon Ross 		error = ETIME;
13474bff34e3Sthurlow 
13484bff34e3Sthurlow 	return (error);
13494bff34e3Sthurlow }
13504bff34e3Sthurlow 
13514bff34e3Sthurlow /*
13524bff34e3Sthurlow  * Shutdown all outstanding I/O requests on the specified share with
13534bff34e3Sthurlow  * ENXIO; used when unmounting a share.  (There shouldn't be any for a
13544bff34e3Sthurlow  * non-forced unmount; if this is a forced unmount, we have to shutdown
13554bff34e3Sthurlow  * the requests as part of the unmount process.)
13564bff34e3Sthurlow  */
13574bff34e3Sthurlow void
smb_iod_shutdown_share(struct smb_share * ssp)13584bff34e3Sthurlow smb_iod_shutdown_share(struct smb_share *ssp)
13594bff34e3Sthurlow {
13604bff34e3Sthurlow 	struct smb_vc *vcp = SSTOVC(ssp);
13614bff34e3Sthurlow 	struct smb_rq *rqp;
13624bff34e3Sthurlow 
13634bff34e3Sthurlow 	/*
13644bff34e3Sthurlow 	 * Loop through the list of requests and shutdown the ones
13654bff34e3Sthurlow 	 * that are for the specified share.
13664bff34e3Sthurlow 	 */
13674bff34e3Sthurlow 	rw_enter(&vcp->iod_rqlock, RW_READER);
13684bff34e3Sthurlow 	TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) {
13694bff34e3Sthurlow 		if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp)
13704bff34e3Sthurlow 			smb_iod_rqprocessed(rqp, EIO, 0);
13714bff34e3Sthurlow 	}
13724bff34e3Sthurlow 	rw_exit(&vcp->iod_rqlock);
13734bff34e3Sthurlow }
13744bff34e3Sthurlow 
137540c0e231SGordon Ross /*
137640c0e231SGordon Ross  * Ioctl functions called by the user-level I/O Deamon (IOD)
137740c0e231SGordon Ross  * to bring up and service a connection to some SMB server.
137840c0e231SGordon Ross  */
137940c0e231SGordon Ross 
1380adee6784SGordon Ross /*
1381adee6784SGordon Ross  * Handle ioctl SMBIOC_IOD_CONNECT
1382adee6784SGordon Ross  */
138340c0e231SGordon Ross int
nsmb_iod_connect(struct smb_vc * vcp,cred_t * cr)1384adee6784SGordon Ross nsmb_iod_connect(struct smb_vc *vcp, cred_t *cr)
138540c0e231SGordon Ross {
138640c0e231SGordon Ross 	int err, val;
138740c0e231SGordon Ross 
138840c0e231SGordon Ross 	ASSERT(vcp->iod_thr == curthread);
138940c0e231SGordon Ross 
139040c0e231SGordon Ross 	if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
139140c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: bad state %d", vcp->vc_state);
139240c0e231SGordon Ross 		return (EINVAL);
139340c0e231SGordon Ross 	}
139440c0e231SGordon Ross 
1395adee6784SGordon Ross 	/*
1396adee6784SGordon Ross 	 * Putting a TLI endpoint back in the right state for a new
1397adee6784SGordon Ross 	 * connection is a bit tricky.  In theory, this could be:
1398adee6784SGordon Ross 	 *	SMB_TRAN_DISCONNECT(vcp);
1399adee6784SGordon Ross 	 *	SMB_TRAN_UNBIND(vcp);
1400adee6784SGordon Ross 	 * but that method often results in TOUTSTATE errors.
1401adee6784SGordon Ross 	 * It's easier to just close it and open a new endpoint.
1402adee6784SGordon Ross 	 */
1403adee6784SGordon Ross 	SMB_VC_LOCK(vcp);
1404adee6784SGordon Ross 	if (vcp->vc_tdata)
1405adee6784SGordon Ross 		SMB_TRAN_DONE(vcp);
1406adee6784SGordon Ross 	err = SMB_TRAN_CREATE(vcp, cr);
1407adee6784SGordon Ross 	SMB_VC_UNLOCK(vcp);
1408adee6784SGordon Ross 	if (err != 0)
1409adee6784SGordon Ross 		return (err);
1410adee6784SGordon Ross 
141140c0e231SGordon Ross 	/*
141240c0e231SGordon Ross 	 * Set various options on this endpoint.
141340c0e231SGordon Ross 	 * Keep going in spite of errors.
141440c0e231SGordon Ross 	 */
141540c0e231SGordon Ross 	val = smb_tcpsndbuf;
141640c0e231SGordon Ross 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_SNDBUF, &val);
141740c0e231SGordon Ross 	if (err != 0) {
141840c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: setopt SNDBUF, err=%d", err);
141940c0e231SGordon Ross 	}
142040c0e231SGordon Ross 	val = smb_tcprcvbuf;
142140c0e231SGordon Ross 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_RCVBUF, &val);
142240c0e231SGordon Ross 	if (err != 0) {
142340c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: setopt RCVBUF, err=%d", err);
142440c0e231SGordon Ross 	}
142540c0e231SGordon Ross 	val = 1;
142640c0e231SGordon Ross 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_KEEPALIVE, &val);
142740c0e231SGordon Ross 	if (err != 0) {
142840c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: setopt KEEPALIVE, err=%d", err);
142940c0e231SGordon Ross 	}
143040c0e231SGordon Ross 	val = 1;
143140c0e231SGordon Ross 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_NODELAY, &val);
143240c0e231SGordon Ross 	if (err != 0) {
1433*640abd28SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: setopt TCP_NODELAY err=%d", err);
143440c0e231SGordon Ross 	}
143540c0e231SGordon Ross 	val = smb_connect_timeout * 1000;
143640c0e231SGordon Ross 	err = SMB_TRAN_SETPARAM(vcp, SMBTP_TCP_CON_TMO, &val);
143740c0e231SGordon Ross 	if (err != 0) {
1438*640abd28SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: setopt TCP con tmo err=%d", err);
143940c0e231SGordon Ross 	}
144040c0e231SGordon Ross 
144140c0e231SGordon Ross 	/*
144240c0e231SGordon Ross 	 * Bind and connect
144340c0e231SGordon Ross 	 */
144440c0e231SGordon Ross 	err = SMB_TRAN_BIND(vcp, NULL);
144540c0e231SGordon Ross 	if (err != 0) {
144640c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_connect: t_kbind: err=%d", err);
144740c0e231SGordon Ross 		/* Continue on and try connect. */
144840c0e231SGordon Ross 	}
144940c0e231SGordon Ross 	err = SMB_TRAN_CONNECT(vcp, &vcp->vc_srvaddr.sa);
145040c0e231SGordon Ross 	/*
145140c0e231SGordon Ross 	 * No cmn_err here, as connect failures are normal, i.e.
145240c0e231SGordon Ross 	 * when a server has multiple addresses and only some are
145340c0e231SGordon Ross 	 * routed for us. (libsmbfs tries them all)
145440c0e231SGordon Ross 	 */
145540c0e231SGordon Ross 	if (err == 0) {
145640c0e231SGordon Ross 		SMB_VC_LOCK(vcp);
145740c0e231SGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_CONNECTED);
145840c0e231SGordon Ross 		SMB_VC_UNLOCK(vcp);
145940c0e231SGordon Ross 	} /* else stay in state reconnect */
146040c0e231SGordon Ross 
146140c0e231SGordon Ross 	return (err);
146240c0e231SGordon Ross }
146340c0e231SGordon Ross 
146440c0e231SGordon Ross /*
1465adee6784SGordon Ross  * Handle ioctl SMBIOC_IOD_NEGOTIATE
146640c0e231SGordon Ross  * Do the whole SMB1/SMB2 negotiate
1467adee6784SGordon Ross  *
1468adee6784SGordon Ross  * This is where we send our first request to the server.
1469adee6784SGordon Ross  * If this is the first time we're talking to this server,
1470adee6784SGordon Ross  * (meaning not a reconnect) then we don't know whether
1471adee6784SGordon Ross  * the server supports SMB2, so we need to use the weird
1472adee6784SGordon Ross  * SMB1-to-SMB2 negotiation. That's where we send an SMB1
1473adee6784SGordon Ross  * negotiate including dialect "SMB 2.???" and if the
1474adee6784SGordon Ross  * server supports SMB2 we get an SMB2 reply -- Yes, an
1475adee6784SGordon Ross  * SMB2 reply to an SMB1 request.  A strange protocol...
1476adee6784SGordon Ross  *
1477adee6784SGordon Ross  * If on the other hand we already know the server supports
1478adee6784SGordon Ross  * SMB2 (because this is a reconnect) or if the client side
1479adee6784SGordon Ross  * has disabled SMB1 entirely, we'll skip the SMB1 part.
148040c0e231SGordon Ross  */
148140c0e231SGordon Ross int
nsmb_iod_negotiate(struct smb_vc * vcp,cred_t * cr)148240c0e231SGordon Ross nsmb_iod_negotiate(struct smb_vc *vcp, cred_t *cr)
148340c0e231SGordon Ross {
148440c0e231SGordon Ross 	struct smb_sopt *sv = &vcp->vc_sopt;
148540c0e231SGordon Ross 	smb_cred_t scred;
148640c0e231SGordon Ross 	int err = 0;
148740c0e231SGordon Ross 
148840c0e231SGordon Ross 	ASSERT(vcp->iod_thr == curthread);
148940c0e231SGordon Ross 
1490adee6784SGordon Ross 	smb_credinit(&scred, cr);
1491adee6784SGordon Ross 
149240c0e231SGordon Ross 	if (vcp->vc_state != SMBIOD_ST_CONNECTED) {
149340c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_negotiate: bad state %d", vcp->vc_state);
1494adee6784SGordon Ross 		err = EINVAL;
1495adee6784SGordon Ross 		goto out;
1496adee6784SGordon Ross 	}
1497adee6784SGordon Ross 
1498adee6784SGordon Ross 	if (vcp->vc_maxver == 0 || vcp->vc_minver > vcp->vc_maxver) {
1499adee6784SGordon Ross 		err = EINVAL;
1500adee6784SGordon Ross 		goto out;
150140c0e231SGordon Ross 	}
150240c0e231SGordon Ross 
150340c0e231SGordon Ross 	/*
150440c0e231SGordon Ross 	 * (Re)init negotiated values
150540c0e231SGordon Ross 	 */
150640c0e231SGordon Ross 	bzero(sv, sizeof (*sv));
1507adee6784SGordon Ross 	vcp->vc2_next_message_id = 0;
1508adee6784SGordon Ross 	vcp->vc2_limit_message_id = 1;
1509adee6784SGordon Ross 	vcp->vc2_session_id = 0;
151040c0e231SGordon Ross 	vcp->vc_next_seq = 0;
151140c0e231SGordon Ross 
151240c0e231SGordon Ross 	/*
151340c0e231SGordon Ross 	 * If this was reconnect, get rid of the old MAC key
151440c0e231SGordon Ross 	 * and session key.
151540c0e231SGordon Ross 	 */
151640c0e231SGordon Ross 	SMB_VC_LOCK(vcp);
151740c0e231SGordon Ross 	if (vcp->vc_mackey != NULL) {
151840c0e231SGordon Ross 		kmem_free(vcp->vc_mackey, vcp->vc_mackeylen);
151940c0e231SGordon Ross 		vcp->vc_mackey = NULL;
152040c0e231SGordon Ross 		vcp->vc_mackeylen = 0;
152140c0e231SGordon Ross 	}
152240c0e231SGordon Ross 	if (vcp->vc_ssnkey != NULL) {
152340c0e231SGordon Ross 		kmem_free(vcp->vc_ssnkey, vcp->vc_ssnkeylen);
152440c0e231SGordon Ross 		vcp->vc_ssnkey = NULL;
152540c0e231SGordon Ross 		vcp->vc_ssnkeylen = 0;
152640c0e231SGordon Ross 	}
152740c0e231SGordon Ross 	SMB_VC_UNLOCK(vcp);
152840c0e231SGordon Ross 
1529adee6784SGordon Ross 	/*
1530adee6784SGordon Ross 	 * If this is not an SMB2 reconect (SMBV_SMB2 not set),
1531adee6784SGordon Ross 	 * and if SMB1 is enabled, do SMB1 neogotiate.  Then
1532adee6784SGordon Ross 	 * if either SMB1-to-SMB2 negotiate tells us we should
1533adee6784SGordon Ross 	 * switch to SMB2, or the local configuration has
1534adee6784SGordon Ross 	 * disabled SMB1, set the SMBV_SMB2 flag.
1535adee6784SGordon Ross 	 *
1536adee6784SGordon Ross 	 * Note that vc_maxver is handled in smb_smb_negotiate
1537adee6784SGordon Ross 	 * so we never get sv_proto == SMB_DIALECT_SMB2_FF when
1538adee6784SGordon Ross 	 * the local configuration disables SMB2, and therefore
1539adee6784SGordon Ross 	 * we won't set the SMBV_SMB2 flag.
1540adee6784SGordon Ross 	 */
1541adee6784SGordon Ross 	if ((vcp->vc_flags & SMBV_SMB2) == 0) {
1542adee6784SGordon Ross 		if (vcp->vc_minver < SMB2_DIALECT_BASE) {
1543adee6784SGordon Ross 			/*
1544adee6784SGordon Ross 			 * SMB1 is enabled
1545adee6784SGordon Ross 			 */
1546adee6784SGordon Ross 			err = smb_smb_negotiate(vcp, &scred);
1547adee6784SGordon Ross 			if (err != 0)
1548adee6784SGordon Ross 				goto out;
1549adee6784SGordon Ross 		}
1550adee6784SGordon Ross 		/*
1551adee6784SGordon Ross 		 * If SMB1-to-SMB2 negotiate told us we should
1552adee6784SGordon Ross 		 * switch to SMB2, or if the local configuration
1553adee6784SGordon Ross 		 * disables SMB1, set the SMB2 flag.
1554adee6784SGordon Ross 		 */
1555adee6784SGordon Ross 		if (sv->sv_proto == SMB_DIALECT_SMB2_FF ||
1556adee6784SGordon Ross 		    vcp->vc_minver >= SMB2_DIALECT_BASE) {
1557adee6784SGordon Ross 			/*
1558adee6784SGordon Ross 			 * Switch this VC to SMB2.
1559adee6784SGordon Ross 			 */
1560adee6784SGordon Ross 			SMB_VC_LOCK(vcp);
1561adee6784SGordon Ross 			vcp->vc_flags |= SMBV_SMB2;
1562adee6784SGordon Ross 			SMB_VC_UNLOCK(vcp);
1563adee6784SGordon Ross 		}
1564adee6784SGordon Ross 	}
1565adee6784SGordon Ross 
1566adee6784SGordon Ross 	/*
1567adee6784SGordon Ross 	 * If this is an SMB2 reconnect (SMBV_SMB2 was set before this
1568adee6784SGordon Ross 	 * function was called), or SMB1-to-SMB2 negotiate indicated
1569adee6784SGordon Ross 	 * we should switch to SMB2, or we have SMB1 disabled (both
1570adee6784SGordon Ross 	 * cases set SMBV_SMB2 above), then do SMB2 negotiate.
1571adee6784SGordon Ross 	 */
1572adee6784SGordon Ross 	if ((vcp->vc_flags & SMBV_SMB2) != 0) {
1573adee6784SGordon Ross 		err = smb2_smb_negotiate(vcp, &scred);
1574adee6784SGordon Ross 	}
157540c0e231SGordon Ross 
1576adee6784SGordon Ross out:
157740c0e231SGordon Ross 	if (err == 0) {
157840c0e231SGordon Ross 		SMB_VC_LOCK(vcp);
157940c0e231SGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_NEGOTIATED);
158040c0e231SGordon Ross 		SMB_VC_UNLOCK(vcp);
158140c0e231SGordon Ross 	}
158240c0e231SGordon Ross 	/*
158340c0e231SGordon Ross 	 * (else) leave state as it was.
1584adee6784SGordon Ross 	 * User-level will either close this handle (if connecting
1585adee6784SGordon Ross 	 * for the first time) or call rcfail and then try again.
158640c0e231SGordon Ross 	 */
158740c0e231SGordon Ross 
1588adee6784SGordon Ross 	smb_credrele(&scred);
1589adee6784SGordon Ross 
159040c0e231SGordon Ross 	return (err);
159140c0e231SGordon Ross }
159240c0e231SGordon Ross 
159340c0e231SGordon Ross /*
1594adee6784SGordon Ross  * Handle ioctl SMBIOC_IOD_SSNSETUP
1595adee6784SGordon Ross  * Do either SMB1 or SMB2 session setup (one call/reply)
159640c0e231SGordon Ross  */
159740c0e231SGordon Ross int
nsmb_iod_ssnsetup(struct smb_vc * vcp,cred_t * cr)159840c0e231SGordon Ross nsmb_iod_ssnsetup(struct smb_vc *vcp, cred_t *cr)
159940c0e231SGordon Ross {
160040c0e231SGordon Ross 	smb_cred_t scred;
160140c0e231SGordon Ross 	int err;
160240c0e231SGordon Ross 
160340c0e231SGordon Ross 	ASSERT(vcp->iod_thr == curthread);
160440c0e231SGordon Ross 
160540c0e231SGordon Ross 	switch (vcp->vc_state) {
160640c0e231SGordon Ross 	case SMBIOD_ST_NEGOTIATED:
160740c0e231SGordon Ross 	case SMBIOD_ST_AUTHCONT:
160840c0e231SGordon Ross 		break;
160940c0e231SGordon Ross 	default:
161040c0e231SGordon Ross 		return (EINVAL);
161140c0e231SGordon Ross 	}
161240c0e231SGordon Ross 
161340c0e231SGordon Ross 	smb_credinit(&scred, cr);
1614adee6784SGordon Ross 	if (vcp->vc_flags & SMBV_SMB2)
1615adee6784SGordon Ross 		err = smb2_smb_ssnsetup(vcp, &scred);
1616adee6784SGordon Ross 	else
1617adee6784SGordon Ross 		err = smb_smb_ssnsetup(vcp, &scred);
161840c0e231SGordon Ross 	smb_credrele(&scred);
161940c0e231SGordon Ross 
162040c0e231SGordon Ross 	SMB_VC_LOCK(vcp);
162140c0e231SGordon Ross 	switch (err) {
162240c0e231SGordon Ross 	case 0:
162340c0e231SGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHOK);
162440c0e231SGordon Ross 		break;
162540c0e231SGordon Ross 	case EINPROGRESS:	/* MORE_PROCESSING_REQUIRED */
162640c0e231SGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHCONT);
162740c0e231SGordon Ross 		break;
162840c0e231SGordon Ross 	default:
162940c0e231SGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_AUTHFAIL);
163040c0e231SGordon Ross 		break;
163140c0e231SGordon Ross 	}
163240c0e231SGordon Ross 	SMB_VC_UNLOCK(vcp);
163340c0e231SGordon Ross 
163440c0e231SGordon Ross 	return (err);
163540c0e231SGordon Ross }
163640c0e231SGordon Ross 
1637adee6784SGordon Ross static int
smb_iod_logoff(struct smb_vc * vcp,cred_t * cr)1638adee6784SGordon Ross smb_iod_logoff(struct smb_vc *vcp, cred_t *cr)
1639adee6784SGordon Ross {
1640adee6784SGordon Ross 	smb_cred_t scred;
1641adee6784SGordon Ross 	int err;
1642adee6784SGordon Ross 
1643adee6784SGordon Ross 	ASSERT(vcp->iod_thr == curthread);
1644adee6784SGordon Ross 
1645adee6784SGordon Ross 	smb_credinit(&scred, cr);
1646adee6784SGordon Ross 	if (vcp->vc_flags & SMBV_SMB2)
1647adee6784SGordon Ross 		err = smb2_smb_logoff(vcp, &scred);
1648adee6784SGordon Ross 	else
1649adee6784SGordon Ross 		err = smb_smb_logoff(vcp, &scred);
1650adee6784SGordon Ross 	smb_credrele(&scred);
1651adee6784SGordon Ross 
1652adee6784SGordon Ross 	return (err);
1653adee6784SGordon Ross }
1654adee6784SGordon Ross 
1655adee6784SGordon Ross /*
1656adee6784SGordon Ross  * Handle ioctl SMBIOC_IOD_WORK
1657adee6784SGordon Ross  *
1658adee6784SGordon Ross  * The smbiod agent calls this after authentication to become
1659adee6784SGordon Ross  * the reader for this session, so long as that's possible.
1660adee6784SGordon Ross  * This should only return non-zero if we want that agent to
1661adee6784SGordon Ross  * give up on this VC permanently.
1662adee6784SGordon Ross  */
166340c0e231SGordon Ross /* ARGSUSED */
1664613a2f6bSGordon Ross int
smb_iod_vc_work(struct smb_vc * vcp,int flags,cred_t * cr)166540c0e231SGordon Ross smb_iod_vc_work(struct smb_vc *vcp, int flags, cred_t *cr)
16664bff34e3Sthurlow {
166740c0e231SGordon Ross 	smbioc_ssn_work_t *wk = &vcp->vc_work;
1668613a2f6bSGordon Ross 	int err = 0;
16694bff34e3Sthurlow 
1670613a2f6bSGordon Ross 	/*
1671613a2f6bSGordon Ross 	 * This is called by the one-and-only
1672613a2f6bSGordon Ross 	 * IOD thread for this VC.
1673613a2f6bSGordon Ross 	 */
1674613a2f6bSGordon Ross 	ASSERT(vcp->iod_thr == curthread);
16754bff34e3Sthurlow 
1676613a2f6bSGordon Ross 	/*
167740c0e231SGordon Ross 	 * Should be in state...
167840c0e231SGordon Ross 	 */
167940c0e231SGordon Ross 	if (vcp->vc_state != SMBIOD_ST_AUTHOK) {
168040c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_vc_work: bad state %d", vcp->vc_state);
168140c0e231SGordon Ross 		return (EINVAL);
168240c0e231SGordon Ross 	}
168340c0e231SGordon Ross 
168440c0e231SGordon Ross 	/*
168540c0e231SGordon Ross 	 * Update the session key and initialize SMB signing.
168640c0e231SGordon Ross 	 *
168740c0e231SGordon Ross 	 * This implementation does not use multiple SMB sessions per
168840c0e231SGordon Ross 	 * TCP connection (where only the first session key is used)
168940c0e231SGordon Ross 	 * so we always have a new session key here.  Sanity check the
169040c0e231SGordon Ross 	 * length from user space.  Normally 16 or 32.
1691613a2f6bSGordon Ross 	 */
169240c0e231SGordon Ross 	if (wk->wk_u_ssnkey_len > 1024) {
169340c0e231SGordon Ross 		cmn_err(CE_NOTE, "iod_vc_work: ssn key too long");
169440c0e231SGordon Ross 		return (EINVAL);
1695613a2f6bSGordon Ross 	}
169640c0e231SGordon Ross 
169740c0e231SGordon Ross 	ASSERT(vcp->vc_ssnkey == NULL);
169840c0e231SGordon Ross 	SMB_VC_LOCK(vcp);
169940c0e231SGordon Ross 	if (wk->wk_u_ssnkey_len != 0 &&
170040c0e231SGordon Ross 	    wk->wk_u_ssnkey_buf.lp_ptr != NULL) {
170140c0e231SGordon Ross 		vcp->vc_ssnkeylen = wk->wk_u_ssnkey_len;
170240c0e231SGordon Ross 		vcp->vc_ssnkey = kmem_alloc(vcp->vc_ssnkeylen, KM_SLEEP);
170340c0e231SGordon Ross 		if (ddi_copyin(wk->wk_u_ssnkey_buf.lp_ptr,
170440c0e231SGordon Ross 		    vcp->vc_ssnkey, vcp->vc_ssnkeylen, flags) != 0) {
170540c0e231SGordon Ross 			err = EFAULT;
170640c0e231SGordon Ross 		}
170740c0e231SGordon Ross 	}
170840c0e231SGordon Ross 	SMB_VC_UNLOCK(vcp);
170940c0e231SGordon Ross 	if (err)
171040c0e231SGordon Ross 		return (err);
17114bff34e3Sthurlow 
17124bff34e3Sthurlow 	/*
171340c0e231SGordon Ross 	 * If we have a session key, derive the MAC key for SMB signing.
171440c0e231SGordon Ross 	 * If this was a NULL session, we might have no session key.
171540c0e231SGordon Ross 	 */
171640c0e231SGordon Ross 	ASSERT(vcp->vc_mackey == NULL);
171740c0e231SGordon Ross 	if (vcp->vc_ssnkey != NULL) {
1718adee6784SGordon Ross 		if (vcp->vc_flags & SMBV_SMB2)
1719adee6784SGordon Ross 			err = smb2_sign_init(vcp);
1720adee6784SGordon Ross 		else
1721adee6784SGordon Ross 			err = smb_sign_init(vcp);
172240c0e231SGordon Ross 		if (err != 0)
172340c0e231SGordon Ross 			return (err);
172440c0e231SGordon Ross 	}
172540c0e231SGordon Ross 
172640c0e231SGordon Ross 	/*
172740c0e231SGordon Ross 	 * Tell any enqueued requests they can start.
17284bff34e3Sthurlow 	 */
17294bff34e3Sthurlow 	SMB_VC_LOCK(vcp);
1730613a2f6bSGordon Ross 	vcp->vc_genid++;	/* possibly new connection */
1731613a2f6bSGordon Ross 	smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE);
1732613a2f6bSGordon Ross 	cv_broadcast(&vcp->vc_statechg);
17334bff34e3Sthurlow 	SMB_VC_UNLOCK(vcp);
17344bff34e3Sthurlow 
17354bff34e3Sthurlow 	/*
1736613a2f6bSGordon Ross 	 * The above cv_broadcast should be sufficient to
1737613a2f6bSGordon Ross 	 * get requests going again.
1738613a2f6bSGordon Ross 	 *
1739613a2f6bSGordon Ross 	 * If we have a callback function, run it.
1740613a2f6bSGordon Ross 	 * Was: smb_iod_notify_connected()
17414bff34e3Sthurlow 	 */
1742613a2f6bSGordon Ross 	if (fscb && fscb->fscb_connect)
1743613a2f6bSGordon Ross 		smb_vc_walkshares(vcp, fscb->fscb_connect);
17444bff34e3Sthurlow 
17454bff34e3Sthurlow 	/*
1746adee6784SGordon Ross 	 * Run the "reader" loop.  An error return here is normal
1747adee6784SGordon Ross 	 * (i.e. when we need to reconnect) so ignore errors.
1748adee6784SGordon Ross 	 * Note: This call updates the vc_state.
17494bff34e3Sthurlow 	 */
1750adee6784SGordon Ross 	(void) smb_iod_recvall(vcp, B_FALSE);
17514bff34e3Sthurlow 
17524bff34e3Sthurlow 	/*
1753613a2f6bSGordon Ross 	 * The reader loop returned, so we must have a
1754613a2f6bSGordon Ross 	 * new state.  (disconnected or reconnecting)
1755613a2f6bSGordon Ross 	 *
1756613a2f6bSGordon Ross 	 * Notify shares of the disconnect.
1757613a2f6bSGordon Ross 	 * Was: smb_iod_notify_disconnect()
17584bff34e3Sthurlow 	 */
1759613a2f6bSGordon Ross 	smb_vc_walkshares(vcp, smb_iod_share_disconnected);
17604bff34e3Sthurlow 
17614bff34e3Sthurlow 	/*
1762613a2f6bSGordon Ross 	 * The reader loop function returns only when
1763613a2f6bSGordon Ross 	 * there's been an error on the connection, or
1764613a2f6bSGordon Ross 	 * this VC has no more references.  It also
1765613a2f6bSGordon Ross 	 * updates the state before it returns.
1766613a2f6bSGordon Ross 	 *
1767613a2f6bSGordon Ross 	 * Tell any requests to give up or restart.
17684bff34e3Sthurlow 	 */
1769613a2f6bSGordon Ross 	smb_iod_invrq(vcp);
1770613a2f6bSGordon Ross 
1771613a2f6bSGordon Ross 	return (err);
1772613a2f6bSGordon Ross }
1773613a2f6bSGordon Ross 
1774613a2f6bSGordon Ross /*
1775adee6784SGordon Ross  * Handle ioctl SMBIOC_IOD_IDLE
1776adee6784SGordon Ross  *
1777adee6784SGordon Ross  * Wait around for someone to ask to use this VC again after the
1778adee6784SGordon Ross  * TCP session has closed.  When one of the connected trees adds a
1779adee6784SGordon Ross  * request, smb_iod_reconnect will set vc_state to RECONNECT and
1780adee6784SGordon Ross  * wake this cv_wait.  When a VC ref. goes away in smb_vc_rele,
1781adee6784SGordon Ross  * that also signals this wait so we can re-check whether we
1782adee6784SGordon Ross  * now hold the last ref. on this VC (and can destroy it).
1783613a2f6bSGordon Ross  */
1784613a2f6bSGordon Ross int
smb_iod_vc_idle(struct smb_vc * vcp)1785613a2f6bSGordon Ross smb_iod_vc_idle(struct smb_vc *vcp)
1786613a2f6bSGordon Ross {
1787613a2f6bSGordon Ross 	int err = 0;
1788adee6784SGordon Ross 	boolean_t destroy = B_FALSE;
17894bff34e3Sthurlow 
17904bff34e3Sthurlow 	/*
1791613a2f6bSGordon Ross 	 * This is called by the one-and-only
1792613a2f6bSGordon Ross 	 * IOD thread for this VC.
17934bff34e3Sthurlow 	 */
1794613a2f6bSGordon Ross 	ASSERT(vcp->iod_thr == curthread);
1795613a2f6bSGordon Ross 
1796adee6784SGordon Ross 	/*
1797adee6784SGordon Ross 	 * Should be in state...
1798adee6784SGordon Ross 	 */
1799adee6784SGordon Ross 	if (vcp->vc_state != SMBIOD_ST_IDLE &&
1800adee6784SGordon Ross 	    vcp->vc_state != SMBIOD_ST_RECONNECT) {
1801adee6784SGordon Ross 		cmn_err(CE_NOTE, "iod_vc_idle: bad state %d", vcp->vc_state);
1802adee6784SGordon Ross 		return (EINVAL);
1803adee6784SGordon Ross 	}
1804adee6784SGordon Ross 
1805613a2f6bSGordon Ross 	SMB_VC_LOCK(vcp);
1806adee6784SGordon Ross 
1807adee6784SGordon Ross 	while (vcp->vc_state == SMBIOD_ST_IDLE &&
1808adee6784SGordon Ross 	    vcp->vc_co.co_usecount > 1) {
1809adee6784SGordon Ross 		if (cv_wait_sig(&vcp->iod_idle, &vcp->vc_lock) == 0) {
1810613a2f6bSGordon Ross 			err = EINTR;
1811613a2f6bSGordon Ross 			break;
1812613a2f6bSGordon Ross 		}
1813613a2f6bSGordon Ross 	}
1814adee6784SGordon Ross 	if (vcp->vc_state == SMBIOD_ST_IDLE &&
1815adee6784SGordon Ross 	    vcp->vc_co.co_usecount == 1) {
1816adee6784SGordon Ross 		/*
1817adee6784SGordon Ross 		 * We were woken because we now have the last ref.
1818adee6784SGordon Ross 		 * Arrange for this VC to be destroyed now.
1819adee6784SGordon Ross 		 * Set the "GONE" flag while holding the lock,
1820adee6784SGordon Ross 		 * to prevent a race with new references.
1821adee6784SGordon Ross 		 * The destroy happens after unlock.
1822adee6784SGordon Ross 		 */
1823adee6784SGordon Ross 		vcp->vc_flags |= SMBV_GONE;
1824adee6784SGordon Ross 		destroy = B_TRUE;
1825adee6784SGordon Ross 	}
1826adee6784SGordon Ross 
1827613a2f6bSGordon Ross 	SMB_VC_UNLOCK(vcp);
18284bff34e3Sthurlow 
1829adee6784SGordon Ross 	if (destroy) {
1830adee6784SGordon Ross 		/* This sets vc_state = DEAD */
1831adee6784SGordon Ross 		smb_iod_disconnect(vcp);
1832adee6784SGordon Ross 	}
1833adee6784SGordon Ross 
1834613a2f6bSGordon Ross 	return (err);
18354bff34e3Sthurlow }
18364bff34e3Sthurlow 
18374bff34e3Sthurlow /*
1838adee6784SGordon Ross  * Handle ioctl SMBIOC_IOD_RCFAIL
1839adee6784SGordon Ross  *
1840613a2f6bSGordon Ross  * After a failed reconnect attempt, smbiod will
1841613a2f6bSGordon Ross  * call this to make current requests error out.
18424bff34e3Sthurlow  */
18434bff34e3Sthurlow int
smb_iod_vc_rcfail(struct smb_vc * vcp)1844613a2f6bSGordon Ross smb_iod_vc_rcfail(struct smb_vc *vcp)
18454bff34e3Sthurlow {
1846d3d50737SRafael Vanoni 	clock_t tr;
1847613a2f6bSGordon Ross 	int err = 0;
18484bff34e3Sthurlow 
18494bff34e3Sthurlow 	/*
1850613a2f6bSGordon Ross 	 * This is called by the one-and-only
1851613a2f6bSGordon Ross 	 * IOD thread for this VC.
18524bff34e3Sthurlow 	 */
1853613a2f6bSGordon Ross 	ASSERT(vcp->iod_thr == curthread);
18544bff34e3Sthurlow 	SMB_VC_LOCK(vcp);
18554bff34e3Sthurlow 
1856613a2f6bSGordon Ross 	smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED);
1857613a2f6bSGordon Ross 	cv_broadcast(&vcp->vc_statechg);
18584bff34e3Sthurlow 
18594bff34e3Sthurlow 	/*
1860613a2f6bSGordon Ross 	 * Short wait here for two reasons:
1861613a2f6bSGordon Ross 	 * (1) Give requests a chance to error out.
1862613a2f6bSGordon Ross 	 * (2) Prevent immediate retry.
18634bff34e3Sthurlow 	 */
1864d3d50737SRafael Vanoni 	tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock,
1865d3d50737SRafael Vanoni 	    SEC_TO_TICK(5), TR_CLOCK_TICK);
1866613a2f6bSGordon Ross 	if (tr == 0)
1867613a2f6bSGordon Ross 		err = EINTR;
18684bff34e3Sthurlow 
186940c0e231SGordon Ross 	/*
1870adee6784SGordon Ross 	 * Normally we'll switch to state IDLE here.  However,
1871adee6784SGordon Ross 	 * if something called smb_iod_reconnect() while we were
1872adee6784SGordon Ross 	 * waiting above, we'll be in in state reconnect already.
1873adee6784SGordon Ross 	 * In that case, keep state RECONNECT, so we essentially
1874adee6784SGordon Ross 	 * skip transition through state IDLE that would normally
1875adee6784SGordon Ross 	 * happen next.
187640c0e231SGordon Ross 	 */
1877adee6784SGordon Ross 	if (vcp->vc_state != SMBIOD_ST_RECONNECT) {
187840c0e231SGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_IDLE);
1879adee6784SGordon Ross 		cv_broadcast(&vcp->vc_statechg);
1880adee6784SGordon Ross 	}
18814bff34e3Sthurlow 
18824bff34e3Sthurlow 	SMB_VC_UNLOCK(vcp);
18834bff34e3Sthurlow 
1884613a2f6bSGordon Ross 	return (err);
18854bff34e3Sthurlow }
18864bff34e3Sthurlow 
18874bff34e3Sthurlow /*
1888613a2f6bSGordon Ross  * Ask the IOD to reconnect (if not already underway)
1889613a2f6bSGordon Ross  * then wait for the reconnect to finish.
18904bff34e3Sthurlow  */
18914bff34e3Sthurlow int
smb_iod_reconnect(struct smb_vc * vcp)1892613a2f6bSGordon Ross smb_iod_reconnect(struct smb_vc *vcp)
18934bff34e3Sthurlow {
1894613a2f6bSGordon Ross 	int err = 0, rv;
18954bff34e3Sthurlow 
18964bff34e3Sthurlow 	SMB_VC_LOCK(vcp);
1897613a2f6bSGordon Ross again:
1898613a2f6bSGordon Ross 	switch (vcp->vc_state) {
1899613a2f6bSGordon Ross 
1900613a2f6bSGordon Ross 	case SMBIOD_ST_IDLE:
1901adee6784SGordon Ross 		/* Tell the IOD thread it's no longer IDLE. */
1902613a2f6bSGordon Ross 		smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT);
1903613a2f6bSGordon Ross 		cv_signal(&vcp->iod_idle);
1904613a2f6bSGordon Ross 		/* FALLTHROUGH */
1905613a2f6bSGordon Ross 
1906613a2f6bSGordon Ross 	case SMBIOD_ST_RECONNECT:
190740c0e231SGordon Ross 	case SMBIOD_ST_CONNECTED:
190840c0e231SGordon Ross 	case SMBIOD_ST_NEGOTIATED:
190940c0e231SGordon Ross 	case SMBIOD_ST_AUTHCONT:
191040c0e231SGordon Ross 	case SMBIOD_ST_AUTHOK:
1911adee6784SGordon Ross 		/* Wait for the VC state to become ACTIVE. */
1912613a2f6bSGordon Ross 		rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock);
1913613a2f6bSGordon Ross 		if (rv == 0) {
1914613a2f6bSGordon Ross 			err = EINTR;
1915613a2f6bSGordon Ross 			break;
19164bff34e3Sthurlow 		}
1917613a2f6bSGordon Ross 		goto again;
1918613a2f6bSGordon Ross 
1919613a2f6bSGordon Ross 	case SMBIOD_ST_VCACTIVE:
1920613a2f6bSGordon Ross 		err = 0; /* success! */
1921613a2f6bSGordon Ross 		break;
1922613a2f6bSGordon Ross 
192340c0e231SGordon Ross 	case SMBIOD_ST_AUTHFAIL:
1924613a2f6bSGordon Ross 	case SMBIOD_ST_RCFAILED:
1925613a2f6bSGordon Ross 	case SMBIOD_ST_DEAD:
1926613a2f6bSGordon Ross 	default:
1927613a2f6bSGordon Ross 		err = ENOTCONN;
1928613a2f6bSGordon Ross 		break;
19294bff34e3Sthurlow 	}
19304bff34e3Sthurlow 
1931613a2f6bSGordon Ross 	SMB_VC_UNLOCK(vcp);
1932613a2f6bSGordon Ross 	return (err);
19334bff34e3Sthurlow }
1934