xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c (revision 9c9af2590af49bb395bc8d2eace0f2d4ea16d165)
1 /*
2  * Copyright (c) 2000-2001, Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kmem.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/cmn_err.h>
48 #include <sys/sdt.h>
49 
50 #include <netsmb/smb_osdep.h>
51 
52 #include <netsmb/smb.h>
53 #include <netsmb/smb_conn.h>
54 #include <netsmb/smb_subr.h>
55 #include <netsmb/smb_tran.h>
56 #include <netsmb/smb_rq.h>
57 
58 static int  smb_rq_reply(struct smb_rq *rqp);
59 static int  smb_rq_enqueue(struct smb_rq *rqp);
60 static int  smb_rq_getenv(struct smb_connobj *layer,
61 		struct smb_vc **vcpp, struct smb_share **sspp);
62 static int  smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
63 static int  smb_t2_reply(struct smb_t2rq *t2p);
64 static int  smb_nt_reply(struct smb_ntrq *ntp);
65 
66 
67 
68 int
69 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
70 	struct smb_rq **rqpp)
71 {
72 	struct smb_rq *rqp;
73 	int error;
74 
75 	rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
76 	if (rqp == NULL)
77 		return (ENOMEM);
78 	error = smb_rq_init(rqp, layer, cmd, scred);
79 	if (error) {
80 		smb_rq_done(rqp);
81 		return (error);
82 	}
83 	rqp->sr_flags |= SMBR_ALLOCED;
84 	*rqpp = rqp;
85 	return (0);
86 }
87 
88 
89 int
90 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, uchar_t cmd,
91 	struct smb_cred *scred)
92 {
93 	int error;
94 
95 	bzero(rqp, sizeof (*rqp));
96 	mutex_init(&rqp->sr_lock, NULL,  MUTEX_DRIVER, NULL);
97 	cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
98 
99 	error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
100 	if (error)
101 		return (error);
102 
103 	rqp->sr_rexmit = SMBMAXRESTARTS;
104 	rqp->sr_cred = scred;	/* XXX no ref hold */
105 	rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
106 	error = smb_rq_new(rqp, cmd);
107 	if (!error) {
108 		rqp->sr_flags |= SMBR_VCREF;
109 		smb_vc_hold(rqp->sr_vc);
110 	}
111 	return (error);
112 }
113 
114 static int
115 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
116 {
117 	struct smb_vc *vcp = rqp->sr_vc;
118 	struct mbchain *mbp = &rqp->sr_rq;
119 	int error;
120 	static char tzero[12];
121 	caddr_t ptr;
122 	pid_t   pid;
123 
124 	ASSERT(rqp != NULL);
125 	ASSERT(rqp->sr_cred != NULL);
126 	pid = rqp->sr_cred->vc_pid;
127 	rqp->sr_sendcnt = 0;
128 	rqp->sr_cmd = cmd;
129 	mb_done(mbp);
130 	md_done(&rqp->sr_rp);
131 	error = mb_init(mbp);
132 	if (error)
133 		return (error);
134 	mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
135 	mb_put_uint8(mbp, cmd);
136 	mb_put_uint32le(mbp, 0);
137 	rqp->sr_rqflags = vcp->vc_hflags;
138 	mb_put_uint8(mbp, rqp->sr_rqflags);
139 	rqp->sr_rqflags2 = vcp->vc_hflags2;
140 	mb_put_uint16le(mbp, rqp->sr_rqflags2);
141 	mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
142 	ptr = mb_reserve(mbp, sizeof (u_int16_t));
143 	/*LINTED*/
144 	ASSERT(ptr == (caddr_t)((u_int16_t *)ptr));
145 	/*LINTED*/
146 	rqp->sr_rqtid = (u_int16_t *)ptr;
147 	mb_put_uint16le(mbp, (u_int16_t)(pid));
148 	ptr = mb_reserve(mbp, sizeof (u_int16_t));
149 	/*LINTED*/
150 	ASSERT(ptr == (caddr_t)((u_int16_t *)ptr));
151 	/*LINTED*/
152 	rqp->sr_rquid = (u_int16_t *)ptr;
153 	mb_put_uint16le(mbp, rqp->sr_mid);
154 	return (0);
155 }
156 
157 void
158 smb_rq_done(struct smb_rq *rqp)
159 {
160 	/* No locks.  Last ref. here. */
161 	if (rqp->sr_flags & SMBR_VCREF) {
162 		rqp->sr_flags &= ~SMBR_VCREF;
163 		smb_vc_rele(rqp->sr_vc);
164 	}
165 	mb_done(&rqp->sr_rq);
166 	md_done(&rqp->sr_rp);
167 	mutex_destroy(&rqp->sr_lock);
168 	cv_destroy(&rqp->sr_cond);
169 	if (rqp->sr_flags & SMBR_ALLOCED)
170 		kmem_free(rqp, sizeof (*rqp));
171 }
172 
173 /*
174  * Simple request-reply exchange
175  */
176 int
177 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
178 {
179 	int error = EINVAL;
180 
181 	for (; ; ) {
182 		/*
183 		 * Don't send any new requests if force unmount is underway.
184 		 * This check was moved into smb_rq_enqueue.
185 		 */
186 		rqp->sr_flags &= ~SMBR_RESTART;
187 		rqp->sr_timo = timeout;	/* in seconds */
188 		rqp->sr_state = SMBRQ_NOTSENT;
189 		error = smb_rq_enqueue(rqp);
190 		if (error) {
191 			break;
192 		}
193 		error = smb_rq_reply(rqp);
194 		if (!error)
195 			break;
196 		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
197 		    SMBR_RESTART)
198 			break;
199 		if (rqp->sr_rexmit <= 0)
200 			break;
201 		SMBRQ_LOCK(rqp);
202 		if (rqp->sr_share && rqp->sr_share->ss_mount) {
203 			cv_timedwait(&rqp->sr_cond, &(rqp)->sr_lock,
204 			    lbolt + (hz * SMB_RCNDELAY));
205 
206 		} else {
207 			delay(lbolt + (hz * SMB_RCNDELAY));
208 		}
209 		SMBRQ_UNLOCK(rqp);
210 		rqp->sr_rexmit--;
211 #ifdef XXX
212 		timeout *= 2;
213 #endif
214 	}
215 	return (error);
216 }
217 
218 
219 int
220 smb_rq_simple(struct smb_rq *rqp)
221 {
222 	return (smb_rq_simple_timed(rqp, smb_timo_default));
223 }
224 
225 static int
226 smb_rq_enqueue(struct smb_rq *rqp)
227 {
228 	struct smb_vc *vcp = rqp->sr_vc;
229 	struct smb_share *ssp = rqp->sr_share;
230 	int error = 0;
231 
232 	/*
233 	 * Unfortunate special case needed for
234 	 * tree disconnect, which needs sr_share
235 	 * but should skip the reconnect check.
236 	 */
237 	if (rqp->sr_cmd == SMB_COM_TREE_DISCONNECT)
238 		ssp = NULL;
239 
240 	/*
241 	 * If this is an "internal" request, bypass any
242 	 * wait for connection state changes, etc.
243 	 * This request is making those changes.
244 	 */
245 	if (rqp->sr_flags & SMBR_INTERNAL) {
246 		ASSERT(ssp == NULL);
247 		goto just_doit;
248 	}
249 
250 	/*
251 	 * Wait for VC reconnect to finish...
252 	 * XXX: Deal with reconnect later.
253 	 * Just bail out for now.
254 	 *
255 	 * MacOS might check vfs_isforce() here.
256 	 */
257 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
258 		SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
259 		return (ENOTCONN);
260 	}
261 
262 	/*
263 	 * If this request has a "share" object:
264 	 * 1: Deny access if share is _GONE (unmounted)
265 	 * 2: Wait for state changes in that object,
266 	 *    Initiate share (re)connect if needed.
267 	 * XXX: Not really doing 2 yet.
268 	 */
269 	if (ssp) {
270 		if (ssp->ss_flags & SMBS_GONE)
271 			return (ENOTCONN);
272 		SMB_SS_LOCK(ssp);
273 		if (!smb_share_valid(ssp)) {
274 			error = smb_share_tcon(ssp);
275 		}
276 		SMB_SS_UNLOCK(ssp);
277 	}
278 
279 	if (!error) {
280 	just_doit:
281 		error = smb_iod_addrq(rqp);
282 	}
283 
284 	return (error);
285 }
286 
287 /*
288  * Mark location of the word count, which is filled in later by
289  * smb_rw_wend().  Also initialize the counter that it uses
290  * to figure out what value to fill in.
291  *
292  * Note that the word count happens to be 8-bit.
293  */
294 void
295 smb_rq_wstart(struct smb_rq *rqp)
296 {
297 	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
298 	rqp->sr_rq.mb_count = 0;
299 }
300 
301 void
302 smb_rq_wend(struct smb_rq *rqp)
303 {
304 	uint_t wcnt;
305 
306 	if (rqp->sr_wcount == NULL) {
307 		SMBSDEBUG("no wcount\n");
308 		return;
309 	}
310 	wcnt = rqp->sr_rq.mb_count;
311 	if (wcnt > 0x1ff)
312 		SMBSDEBUG("word count too large (%d)\n", wcnt);
313 	if (wcnt & 1)
314 		SMBSDEBUG("odd word count\n");
315 	/* Fill in the word count (8-bits) */
316 	*rqp->sr_wcount = (wcnt >> 1);
317 }
318 
319 /*
320  * Mark location of the byte count, which is filled in later by
321  * smb_rw_bend().  Also initialize the counter that it uses
322  * to figure out what value to fill in.
323  *
324  * Note that the byte count happens to be 16-bit.
325  */
326 void
327 smb_rq_bstart(struct smb_rq *rqp)
328 {
329 	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
330 	rqp->sr_rq.mb_count = 0;
331 }
332 
333 void
334 smb_rq_bend(struct smb_rq *rqp)
335 {
336 	uint_t bcnt;
337 
338 	if (rqp->sr_bcount == NULL) {
339 		SMBSDEBUG("no bcount\n");
340 		return;
341 	}
342 	bcnt = rqp->sr_rq.mb_count;
343 	if (bcnt > 0xffff)
344 		SMBSDEBUG("byte count too large (%d)\n", bcnt);
345 	/*
346 	 * Fill in the byte count (16-bits)
347 	 * The pointer is char * type due to
348 	 * typical off-by-one alignment.
349 	 */
350 	rqp->sr_bcount[0] = bcnt & 0xFF;
351 	rqp->sr_bcount[1] = (bcnt >> 8);
352 }
353 
354 int
355 smb_rq_intr(struct smb_rq *rqp)
356 {
357 	if (rqp->sr_flags & SMBR_INTR)
358 		return (EINTR);
359 
360 	return (0);
361 #ifdef APPLE
362 	return (smb_sigintr(rqp->sr_cred->scr_vfsctx));
363 #endif
364 }
365 
366 int
367 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
368 {
369 	*mbpp = &rqp->sr_rq;
370 	return (0);
371 }
372 
373 int
374 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
375 {
376 	*mbpp = &rqp->sr_rp;
377 	return (0);
378 }
379 
380 static int
381 smb_rq_getenv(struct smb_connobj *co,
382 	struct smb_vc **vcpp, struct smb_share **sspp)
383 {
384 	struct smb_vc *vcp = NULL;
385 	struct smb_share *ssp = NULL;
386 	int error = 0;
387 
388 	if (co->co_flags & SMBO_GONE) {
389 		SMBSDEBUG("zombie CO\n");
390 		error = EINVAL;
391 		goto out;
392 	}
393 
394 	switch (co->co_level) {
395 	case SMBL_VC:
396 		vcp = CPTOVC(co);
397 		if (co->co_parent == NULL) {
398 			SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
399 			error = EINVAL;
400 			break;
401 		}
402 		break;
403 
404 	case SMBL_SHARE:
405 		ssp = CPTOSS(co);
406 		if (co->co_parent == NULL) {
407 			SMBSDEBUG("zombie share %s\n", ssp->ss_name);
408 			error = EINVAL;
409 			break;
410 		}
411 		error = smb_rq_getenv(co->co_parent, &vcp, NULL);
412 		break;
413 	default:
414 		SMBSDEBUG("invalid level %d passed\n", co->co_level);
415 		error = EINVAL;
416 	}
417 
418 out:
419 	if (!error) {
420 		if (vcpp)
421 			*vcpp = vcp;
422 		if (sspp)
423 			*sspp = ssp;
424 	}
425 
426 	return (error);
427 }
428 
429 /*
430  * Wait for reply on the request
431  */
432 static int
433 smb_rq_reply(struct smb_rq *rqp)
434 {
435 	struct mdchain *mdp = &rqp->sr_rp;
436 	u_int32_t tdw;
437 	u_int8_t tb;
438 	int error, rperror = 0;
439 
440 	if (rqp->sr_timo == SMBNOREPLYWAIT)
441 		return (smb_iod_removerq(rqp));
442 
443 	error = smb_iod_waitrq(rqp);
444 	if (error)
445 		return (error);
446 
447 	/*
448 	 * If the request was signed, validate the
449 	 * signature on the response.
450 	 */
451 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
452 		error = smb_rq_verify(rqp);
453 		if (error)
454 			return (error);
455 	}
456 
457 	/*
458 	 * Parse the SMB header
459 	 */
460 	error = md_get_uint32(mdp, &tdw);
461 	if (error)
462 		return (error);
463 	error = md_get_uint8(mdp, &tb);
464 	error = md_get_uint32le(mdp, &rqp->sr_error);
465 	error = md_get_uint8(mdp, &rqp->sr_rpflags);
466 	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
467 	if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
468 		/*
469 		 * Do a special check for STATUS_BUFFER_OVERFLOW;
470 		 * it's not an error.
471 		 */
472 		if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
473 			/*
474 			 * Don't report it as an error to our caller;
475 			 * they can look at rqp->sr_error if they
476 			 * need to know whether we got a
477 			 * STATUS_BUFFER_OVERFLOW.
478 			 * XXX - should we do that for all errors
479 			 * where (error & 0xC0000000) is 0x80000000,
480 			 * i.e. all warnings?
481 			 */
482 			rperror = 0;
483 		} else
484 			rperror = smb_maperr32(rqp->sr_error);
485 	} else {
486 		rqp->sr_errclass = rqp->sr_error & 0xff;
487 		rqp->sr_serror = rqp->sr_error >> 16;
488 		rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
489 	}
490 	if (rperror == EMOREDATA) {
491 		rperror = E2BIG;
492 		rqp->sr_flags |= SMBR_MOREDATA;
493 	} else
494 		rqp->sr_flags &= ~SMBR_MOREDATA;
495 
496 	error = md_get_uint32(mdp, &tdw);
497 	error = md_get_uint32(mdp, &tdw);
498 	error = md_get_uint32(mdp, &tdw);
499 
500 	error = md_get_uint16le(mdp, &rqp->sr_rptid);
501 	error = md_get_uint16le(mdp, &rqp->sr_rppid);
502 	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
503 	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
504 
505 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
506 	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
507 	    rqp->sr_errclass, rqp->sr_serror);
508 
509 	return ((error) ? error : rperror);
510 }
511 
512 
513 #define	ALIGN4(a)	(((a) + 3) & ~3)
514 
515 /*
516  * TRANS2 request implementation
517  * TRANS implementation is in the "t2" routines
518  * NT_TRANSACTION implementation is the separate "nt" stuff
519  */
520 int
521 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
522 	struct smb_t2rq **t2pp)
523 {
524 	struct smb_t2rq *t2p;
525 	int error;
526 
527 	t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
528 	if (t2p == NULL)
529 		return (ENOMEM);
530 	error = smb_t2_init(t2p, layer, &setup, 1, scred);
531 	mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
532 	cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
533 	t2p->t2_flags |= SMBT2_ALLOCED;
534 	if (error) {
535 		smb_t2_done(t2p);
536 		return (error);
537 	}
538 	*t2pp = t2p;
539 	return (0);
540 }
541 
542 int
543 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
544 	struct smb_ntrq **ntpp)
545 {
546 	struct smb_ntrq *ntp;
547 	int error;
548 
549 	ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
550 	if (ntp == NULL)
551 		return (ENOMEM);
552 	error = smb_nt_init(ntp, layer, fn, scred);
553 	mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
554 	cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
555 	ntp->nt_flags |= SMBT2_ALLOCED;
556 	if (error) {
557 		smb_nt_done(ntp);
558 		return (error);
559 	}
560 	*ntpp = ntp;
561 	return (0);
562 }
563 
564 int
565 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
566 	int setupcnt, struct smb_cred *scred)
567 {
568 	int i;
569 	int error;
570 
571 	bzero(t2p, sizeof (*t2p));
572 	t2p->t2_source = source;
573 	t2p->t2_setupcount = (u_int16_t)setupcnt;
574 	t2p->t2_setupdata = t2p->t2_setup;
575 	for (i = 0; i < setupcnt; i++)
576 		t2p->t2_setup[i] = setup[i];
577 	t2p->t2_fid = 0xffff;
578 	t2p->t2_cred = scred;
579 	t2p->t2_share = (source->co_level == SMBL_SHARE ?
580 	    CPTOSS(source) : NULL); /* for smb up/down */
581 	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
582 	if (error)
583 		return (error);
584 	return (0);
585 }
586 
587 int
588 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
589 	struct smb_cred *scred)
590 {
591 	int error;
592 
593 	bzero(ntp, sizeof (*ntp));
594 	ntp->nt_source = source;
595 	ntp->nt_function = fn;
596 	ntp->nt_cred = scred;
597 	ntp->nt_share = (source->co_level == SMBL_SHARE ?
598 	    CPTOSS(source) : NULL); /* for smb up/down */
599 	error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
600 	if (error)
601 		return (error);
602 	return (0);
603 }
604 
605 void
606 smb_t2_done(struct smb_t2rq *t2p)
607 {
608 	mb_done(&t2p->t2_tparam);
609 	mb_done(&t2p->t2_tdata);
610 	md_done(&t2p->t2_rparam);
611 	md_done(&t2p->t2_rdata);
612 	mutex_destroy(&t2p->t2_lock);
613 	cv_destroy(&t2p->t2_cond);
614 	if (t2p->t2_flags & SMBT2_ALLOCED)
615 		kmem_free(t2p, sizeof (*t2p));
616 }
617 
618 u_int32_t
619 smb_t2_err(struct smb_t2rq *t2p)
620 {
621 	/* mask off "severity" and the "component"  bit */
622 	return (t2p->t2_sr_error & ~(0xe0000000));
623 }
624 
625 void
626 smb_nt_done(struct smb_ntrq *ntp)
627 {
628 	mb_done(&ntp->nt_tsetup);
629 	mb_done(&ntp->nt_tparam);
630 	mb_done(&ntp->nt_tdata);
631 	md_done(&ntp->nt_rparam);
632 	md_done(&ntp->nt_rdata);
633 	cv_destroy(&ntp->nt_cond);
634 	mutex_destroy(&ntp->nt_lock);
635 	if (ntp->nt_flags & SMBT2_ALLOCED)
636 		kmem_free(ntp, sizeof (*ntp));
637 }
638 
639 /*
640  * Extract data [offset,count] from mtop and add to mdp.
641  */
642 static int
643 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
644 	struct mdchain *mdp)
645 {
646 	mblk_t *n;
647 
648 	n = m_copym(mtop, offset, count, M_WAITOK);
649 	if (n == NULL)
650 		return (EBADRPC);
651 
652 	if (mdp->md_top == NULL) {
653 		md_initm(mdp, n);
654 	} else
655 		m_cat(mdp->md_top, n);
656 
657 	return (0);
658 }
659 
660 static int
661 smb_t2_reply(struct smb_t2rq *t2p)
662 {
663 	struct mdchain *mdp;
664 	struct smb_rq *rqp = t2p->t2_rq;
665 	int error, error2, totpgot, totdgot;
666 	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
667 	u_int16_t tmp, bc, dcount;
668 	u_int8_t wc;
669 
670 	t2p->t2_flags &= ~SMBT2_MOREDATA;
671 
672 	error = smb_rq_reply(rqp);
673 	if (rqp->sr_flags & SMBR_MOREDATA)
674 		t2p->t2_flags |= SMBT2_MOREDATA;
675 	t2p->t2_sr_errclass = rqp->sr_errclass;
676 	t2p->t2_sr_serror = rqp->sr_serror;
677 	t2p->t2_sr_error = rqp->sr_error;
678 	t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
679 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
680 		return (error);
681 	/*
682 	 * Now we have to get all subseqent responses, if any.
683 	 * The CIFS specification says that they can be misordered,
684 	 * which is weird.
685 	 * TODO: timo
686 	 */
687 	totpgot = totdgot = 0;
688 	totpcount = totdcount = 0xffff;
689 	mdp = &rqp->sr_rp;
690 	for (;;) {
691 		DTRACE_PROBE2(smb_trans_reply,
692 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
693 		m_dumpm(mdp->md_top);
694 
695 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
696 			break;
697 		if (wc < 10) {
698 			error2 = ENOENT;
699 			break;
700 		}
701 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
702 			break;
703 		if (totpcount > tmp)
704 			totpcount = tmp;
705 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
706 			break;
707 		if (totdcount > tmp)
708 			totdcount = tmp;
709 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
710 		    (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
711 		    (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
712 		    (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
713 			break;
714 		if (pcount != 0 && pdisp != totpgot) {
715 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
716 			    pdisp, totpgot);
717 			error2 = EINVAL;
718 			break;
719 		}
720 		if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
721 		    (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
722 		    (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
723 			break;
724 		if (dcount != 0 && ddisp != totdgot) {
725 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
726 			    dcount);
727 			error2 = EINVAL;
728 			break;
729 		}
730 
731 		/* XXX: Skip setup words?  We don't save them? */
732 		md_get_uint8(mdp, &wc);  /* SetupCount */
733 		md_get_uint8(mdp, NULL); /* Reserved2 */
734 		tmp = wc;
735 		while (tmp--)
736 			md_get_uint16(mdp, NULL);
737 
738 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
739 			break;
740 
741 		/*
742 		 * There are pad bytes here, and the poff value
743 		 * indicates where the next data are found.
744 		 * No need to guess at the padding size.
745 		 */
746 		if (pcount) {
747 			error2 = smb_t2_placedata(mdp->md_top, poff,
748 			    pcount, &t2p->t2_rparam);
749 			if (error2)
750 				break;
751 		}
752 		totpgot += pcount;
753 
754 		if (dcount) {
755 			error2 = smb_t2_placedata(mdp->md_top, doff,
756 			    dcount, &t2p->t2_rdata);
757 			if (error2)
758 				break;
759 		}
760 		totdgot += dcount;
761 
762 		if (totpgot >= totpcount && totdgot >= totdcount) {
763 			error2 = 0;
764 			t2p->t2_flags |= SMBT2_ALLRECV;
765 			break;
766 		}
767 		/*
768 		 * We're done with this reply, look for the next one.
769 		 */
770 		SMBRQ_LOCK(rqp);
771 		md_next_record(&rqp->sr_rp);
772 		SMBRQ_UNLOCK(rqp);
773 		error2 = smb_rq_reply(rqp);
774 		if (rqp->sr_flags & SMBR_MOREDATA)
775 			t2p->t2_flags |= SMBT2_MOREDATA;
776 		if (!error2)
777 			continue;
778 		t2p->t2_sr_errclass = rqp->sr_errclass;
779 		t2p->t2_sr_serror = rqp->sr_serror;
780 		t2p->t2_sr_error = rqp->sr_error;
781 		t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
782 		error = error2;
783 		if (!(rqp->sr_flags & SMBR_MOREDATA))
784 			break;
785 	}
786 	return (error ? error : error2);
787 }
788 
789 static int
790 smb_nt_reply(struct smb_ntrq *ntp)
791 {
792 	struct mdchain *mdp;
793 	struct smb_rq *rqp = ntp->nt_rq;
794 	int error, error2;
795 	u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
796 	u_int32_t tmp, dcount, totpgot, totdgot;
797 	u_int16_t bc;
798 	u_int8_t wc;
799 
800 	ntp->nt_flags &= ~SMBT2_MOREDATA;
801 
802 	error = smb_rq_reply(rqp);
803 	if (rqp->sr_flags & SMBR_MOREDATA)
804 		ntp->nt_flags |= SMBT2_MOREDATA;
805 	ntp->nt_sr_error = rqp->sr_error;
806 	ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
807 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
808 		return (error);
809 	/*
810 	 * Now we have to get all subseqent responses. The CIFS specification
811 	 * says that they can be misordered which is weird.
812 	 * TODO: timo
813 	 */
814 	totpgot = totdgot = 0;
815 	totpcount = totdcount = 0xffffffff;
816 	mdp = &rqp->sr_rp;
817 	for (;;) {
818 		DTRACE_PROBE2(smb_trans_reply,
819 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
820 		m_dumpm(mdp->md_top);
821 
822 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
823 			break;
824 		if (wc < 18) {
825 			error2 = ENOENT;
826 			break;
827 		}
828 		md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
829 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
830 			break;
831 		if (totpcount > tmp)
832 			totpcount = tmp;
833 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
834 			break;
835 		if (totdcount > tmp)
836 			totdcount = tmp;
837 		if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
838 		    (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
839 		    (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
840 			break;
841 		if (pcount != 0 && pdisp != totpgot) {
842 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
843 			    pdisp, totpgot);
844 			error2 = EINVAL;
845 			break;
846 		}
847 		if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
848 		    (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
849 		    (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
850 			break;
851 		if (dcount != 0 && ddisp != totdgot) {
852 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
853 			    dcount);
854 			error2 = EINVAL;
855 			break;
856 		}
857 
858 		/* XXX: Skip setup words?  We don't save them? */
859 		md_get_uint8(mdp, &wc);  /* SetupCount */
860 		tmp = wc;
861 		while (tmp--)
862 			md_get_uint16(mdp, NULL);
863 
864 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
865 			break;
866 
867 		/*
868 		 * There are pad bytes here, and the poff value
869 		 * indicates where the next data are found.
870 		 * No need to guess at the padding size.
871 		 */
872 		if (pcount) {
873 			error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
874 			    &ntp->nt_rparam);
875 			if (error2)
876 				break;
877 		}
878 		totpgot += pcount;
879 
880 		if (dcount) {
881 			error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
882 			    &ntp->nt_rdata);
883 			if (error2)
884 				break;
885 		}
886 		totdgot += dcount;
887 
888 		if (totpgot >= totpcount && totdgot >= totdcount) {
889 			error2 = 0;
890 			ntp->nt_flags |= SMBT2_ALLRECV;
891 			break;
892 		}
893 		/*
894 		 * We're done with this reply, look for the next one.
895 		 */
896 		SMBRQ_LOCK(rqp);
897 		md_next_record(&rqp->sr_rp);
898 		SMBRQ_UNLOCK(rqp);
899 		error2 = smb_rq_reply(rqp);
900 		if (rqp->sr_flags & SMBR_MOREDATA)
901 			ntp->nt_flags |= SMBT2_MOREDATA;
902 		if (!error2)
903 			continue;
904 		ntp->nt_sr_error = rqp->sr_error;
905 		ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
906 		error = error2;
907 		if (!(rqp->sr_flags & SMBR_MOREDATA))
908 			break;
909 	}
910 	return (error ? error : error2);
911 }
912 
913 /*
914  * Perform a full round of TRANS2 request
915  */
916 static int
917 smb_t2_request_int(struct smb_t2rq *t2p)
918 {
919 	struct smb_vc *vcp = t2p->t2_vc;
920 	struct smb_cred *scred = t2p->t2_cred;
921 	struct mbchain *mbp;
922 	struct mdchain *mdp, mbparam, mbdata;
923 	mblk_t *m;
924 	struct smb_rq *rqp;
925 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
926 	int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
927 
928 	m = t2p->t2_tparam.mb_top;
929 	if (m) {
930 		md_initm(&mbparam, m);	/* do not free it! */
931 		totpcount = m_fixhdr(m);
932 		if (totpcount > 0xffff)		/* maxvalue for ushort_t */
933 			return (EINVAL);
934 	} else
935 		totpcount = 0;
936 	m = t2p->t2_tdata.mb_top;
937 	if (m) {
938 		md_initm(&mbdata, m);	/* do not free it! */
939 		totdcount =  m_fixhdr(m);
940 		if (totdcount > 0xffff)
941 			return (EINVAL);
942 	} else
943 		totdcount = 0;
944 	leftdcount = totdcount;
945 	leftpcount = totpcount;
946 	txmax = vcp->vc_txmax;
947 	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
948 	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
949 	if (error)
950 		return (error);
951 	rqp->sr_timo = smb_timo_default;
952 	rqp->sr_flags |= SMBR_MULTIPACKET;
953 	t2p->t2_rq = rqp;
954 	mbp = &rqp->sr_rq;
955 	smb_rq_wstart(rqp);
956 	mb_put_uint16le(mbp, totpcount);
957 	mb_put_uint16le(mbp, totdcount);
958 	mb_put_uint16le(mbp, t2p->t2_maxpcount);
959 	mb_put_uint16le(mbp, t2p->t2_maxdcount);
960 	mb_put_uint8(mbp, t2p->t2_maxscount);
961 	mb_put_uint8(mbp, 0);			/* reserved */
962 	mb_put_uint16le(mbp, 0);			/* flags */
963 	mb_put_uint32le(mbp, 0);			/* Timeout */
964 	mb_put_uint16le(mbp, 0);			/* reserved 2 */
965 	len = mb_fixhdr(mbp);
966 
967 	/*
968 	 * Now we know the size of the trans overhead stuff:
969 	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
970 	 * where nmsize is the OTW size of the name, including
971 	 * the unicode null terminator and any alignment.
972 	 * Use this to decide which parts (and how much)
973 	 * can go into this request: params, data
974 	 */
975 	nmlen = t2p->t_name ? t2p->t_name_len : 0;
976 	nmsize = nmlen + 1; /* null term. */
977 	if (SMB_UNICODE_STRINGS(vcp)) {
978 		nmsize *= 2;
979 		/* we know put_dmem will need to align */
980 		nmsize += 1;
981 	}
982 	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
983 	if (len + leftpcount > txmax) {
984 		txpcount = min(leftpcount, txmax - len);
985 		poff = len;
986 		txdcount = 0;
987 		doff = 0;
988 	} else {
989 		txpcount = leftpcount;
990 		poff = txpcount ? len : 0;
991 		/*
992 		 * Other client traffic seems to "ALIGN2" here.  The extra
993 		 * 2 byte pad we use has no observed downside and may be
994 		 * required for some old servers(?)
995 		 */
996 		len = ALIGN4(len + txpcount);
997 		txdcount = min(leftdcount, txmax - len);
998 		doff = txdcount ? len : 0;
999 	}
1000 	leftpcount -= txpcount;
1001 	leftdcount -= txdcount;
1002 	mb_put_uint16le(mbp, txpcount);
1003 	mb_put_uint16le(mbp, poff);
1004 	mb_put_uint16le(mbp, txdcount);
1005 	mb_put_uint16le(mbp, doff);
1006 	mb_put_uint8(mbp, t2p->t2_setupcount);
1007 	mb_put_uint8(mbp, 0);
1008 	for (i = 0; i < t2p->t2_setupcount; i++) {
1009 		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1010 	}
1011 	smb_rq_wend(rqp);
1012 	smb_rq_bstart(rqp);
1013 	if (t2p->t_name) {
1014 		/* Put the string and terminating null. */
1015 		smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1016 		    SMB_CS_NONE, NULL);
1017 	} else {
1018 		/* nmsize accounts for padding, char size. */
1019 		mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1020 	}
1021 	len = mb_fixhdr(mbp);
1022 	if (txpcount) {
1023 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1024 		error = md_get_mbuf(&mbparam, txpcount, &m);
1025 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1026 		if (error)
1027 			goto freerq;
1028 		mb_put_mbuf(mbp, m);
1029 	}
1030 	len = mb_fixhdr(mbp);
1031 	if (txdcount) {
1032 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1033 		error = md_get_mbuf(&mbdata, txdcount, &m);
1034 		if (error)
1035 			goto freerq;
1036 		mb_put_mbuf(mbp, m);
1037 	}
1038 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1039 	error = smb_rq_enqueue(rqp);
1040 	if (error)
1041 		goto freerq;
1042 	if (leftpcount || leftdcount) {
1043 		error = smb_rq_reply(rqp);
1044 		if (error)
1045 			goto bad;
1046 		/*
1047 		 * this is an interim response, ignore it.
1048 		 */
1049 		SMBRQ_LOCK(rqp);
1050 		md_next_record(&rqp->sr_rp);
1051 		SMBRQ_UNLOCK(rqp);
1052 	}
1053 	while (leftpcount || leftdcount) {
1054 		error = smb_rq_new(rqp, t2p->t_name ?
1055 		    SMB_COM_TRANSACTION_SECONDARY :
1056 		    SMB_COM_TRANSACTION2_SECONDARY);
1057 		if (error)
1058 			goto bad;
1059 		mbp = &rqp->sr_rq;
1060 		smb_rq_wstart(rqp);
1061 		mb_put_uint16le(mbp, totpcount);
1062 		mb_put_uint16le(mbp, totdcount);
1063 		len = mb_fixhdr(mbp);
1064 		/*
1065 		 * now we have known packet size as
1066 		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1067 		 * and need to decide which parts should go into request
1068 		 */
1069 		len = ALIGN4(len + 6 * 2 + 2);
1070 		if (t2p->t_name == NULL)
1071 			len += 2;
1072 		if (len + leftpcount > txmax) {
1073 			txpcount = min(leftpcount, txmax - len);
1074 			poff = len;
1075 			txdcount = 0;
1076 			doff = 0;
1077 		} else {
1078 			txpcount = leftpcount;
1079 			poff = txpcount ? len : 0;
1080 			len = ALIGN4(len + txpcount);
1081 			txdcount = min(leftdcount, txmax - len);
1082 			doff = txdcount ? len : 0;
1083 		}
1084 		mb_put_uint16le(mbp, txpcount);
1085 		mb_put_uint16le(mbp, poff);
1086 		mb_put_uint16le(mbp, totpcount - leftpcount);
1087 		mb_put_uint16le(mbp, txdcount);
1088 		mb_put_uint16le(mbp, doff);
1089 		mb_put_uint16le(mbp, totdcount - leftdcount);
1090 		leftpcount -= txpcount;
1091 		leftdcount -= txdcount;
1092 		if (t2p->t_name == NULL)
1093 			mb_put_uint16le(mbp, t2p->t2_fid);
1094 		smb_rq_wend(rqp);
1095 		smb_rq_bstart(rqp);
1096 		mb_put_uint8(mbp, 0);	/* name */
1097 		len = mb_fixhdr(mbp);
1098 		if (txpcount) {
1099 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1100 			error = md_get_mbuf(&mbparam, txpcount, &m);
1101 			if (error)
1102 				goto bad;
1103 			mb_put_mbuf(mbp, m);
1104 		}
1105 		len = mb_fixhdr(mbp);
1106 		if (txdcount) {
1107 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1108 			error = md_get_mbuf(&mbdata, txdcount, &m);
1109 			if (error)
1110 				goto bad;
1111 			mb_put_mbuf(mbp, m);
1112 		}
1113 		smb_rq_bend(rqp);
1114 		error = smb_iod_multirq(rqp);
1115 		if (error)
1116 			goto bad;
1117 	}	/* while left params or data */
1118 	error = smb_t2_reply(t2p);
1119 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1120 		goto bad;
1121 	mdp = &t2p->t2_rdata;
1122 	if (mdp->md_top) {
1123 		m_fixhdr(mdp->md_top);
1124 		md_initm(mdp, mdp->md_top);
1125 	}
1126 	mdp = &t2p->t2_rparam;
1127 	if (mdp->md_top) {
1128 		m_fixhdr(mdp->md_top);
1129 		md_initm(mdp, mdp->md_top);
1130 	}
1131 bad:
1132 	smb_iod_removerq(rqp);
1133 freerq:
1134 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1135 		if (rqp->sr_flags & SMBR_RESTART)
1136 			t2p->t2_flags |= SMBT2_RESTART;
1137 		md_done(&t2p->t2_rparam);
1138 		md_done(&t2p->t2_rdata);
1139 	}
1140 	smb_rq_done(rqp);
1141 	return (error);
1142 }
1143 
1144 
1145 /*
1146  * Perform a full round of NT_TRANSACTION request
1147  */
1148 static int
1149 smb_nt_request_int(struct smb_ntrq *ntp)
1150 {
1151 	struct smb_vc *vcp = ntp->nt_vc;
1152 	struct smb_cred *scred = ntp->nt_cred;
1153 	struct mbchain *mbp;
1154 	struct mdchain *mdp, mbsetup, mbparam, mbdata;
1155 	mblk_t *m;
1156 	struct smb_rq *rqp;
1157 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1158 	int error, doff, poff, txdcount, txpcount;
1159 	int totscount;
1160 
1161 	m = ntp->nt_tsetup.mb_top;
1162 	if (m) {
1163 		md_initm(&mbsetup, m);	/* do not free it! */
1164 		totscount = m_fixhdr(m);
1165 		if (totscount > 2 * 0xff)
1166 			return (EINVAL);
1167 	} else
1168 		totscount = 0;
1169 	m = ntp->nt_tparam.mb_top;
1170 	if (m) {
1171 		md_initm(&mbparam, m);	/* do not free it! */
1172 		totpcount = m_fixhdr(m);
1173 		if (totpcount > 0x7fffffff)
1174 			return (EINVAL);
1175 	} else
1176 		totpcount = 0;
1177 	m = ntp->nt_tdata.mb_top;
1178 	if (m) {
1179 		md_initm(&mbdata, m);	/* do not free it! */
1180 		totdcount =  m_fixhdr(m);
1181 		if (totdcount > 0x7fffffff)
1182 			return (EINVAL);
1183 	} else
1184 		totdcount = 0;
1185 	leftdcount = totdcount;
1186 	leftpcount = totpcount;
1187 	txmax = vcp->vc_txmax;
1188 	error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1189 	if (error)
1190 		return (error);
1191 	rqp->sr_timo = smb_timo_default;
1192 	rqp->sr_flags |= SMBR_MULTIPACKET;
1193 	ntp->nt_rq = rqp;
1194 	mbp = &rqp->sr_rq;
1195 	smb_rq_wstart(rqp);
1196 	mb_put_uint8(mbp, ntp->nt_maxscount);
1197 	mb_put_uint16le(mbp, 0);	/* reserved (flags?) */
1198 	mb_put_uint32le(mbp, totpcount);
1199 	mb_put_uint32le(mbp, totdcount);
1200 	mb_put_uint32le(mbp, ntp->nt_maxpcount);
1201 	mb_put_uint32le(mbp, ntp->nt_maxdcount);
1202 	len = mb_fixhdr(mbp);
1203 	/*
1204 	 * now we have known packet size as
1205 	 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1206 	 * and need to decide which parts should go into the first request
1207 	 */
1208 	len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1209 	if (len + leftpcount > txmax) {
1210 		txpcount = min(leftpcount, txmax - len);
1211 		poff = len;
1212 		txdcount = 0;
1213 		doff = 0;
1214 	} else {
1215 		txpcount = leftpcount;
1216 		poff = txpcount ? len : 0;
1217 		len = ALIGN4(len + txpcount);
1218 		txdcount = min(leftdcount, txmax - len);
1219 		doff = txdcount ? len : 0;
1220 	}
1221 	leftpcount -= txpcount;
1222 	leftdcount -= txdcount;
1223 	mb_put_uint32le(mbp, txpcount);
1224 	mb_put_uint32le(mbp, poff);
1225 	mb_put_uint32le(mbp, txdcount);
1226 	mb_put_uint32le(mbp, doff);
1227 	mb_put_uint8(mbp, (totscount+1)/2);
1228 	mb_put_uint16le(mbp, ntp->nt_function);
1229 	if (totscount) {
1230 		error = md_get_mbuf(&mbsetup, totscount, &m);
1231 		SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1232 		if (error)
1233 			goto freerq;
1234 		mb_put_mbuf(mbp, m);
1235 		if (totscount & 1)
1236 			mb_put_uint8(mbp, 0); /* setup is in words */
1237 	}
1238 	smb_rq_wend(rqp);
1239 	smb_rq_bstart(rqp);
1240 	len = mb_fixhdr(mbp);
1241 	if (txpcount) {
1242 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1243 		error = md_get_mbuf(&mbparam, txpcount, &m);
1244 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1245 		if (error)
1246 			goto freerq;
1247 		mb_put_mbuf(mbp, m);
1248 	}
1249 	len = mb_fixhdr(mbp);
1250 	if (txdcount) {
1251 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1252 		error = md_get_mbuf(&mbdata, txdcount, &m);
1253 		if (error)
1254 			goto freerq;
1255 		mb_put_mbuf(mbp, m);
1256 	}
1257 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1258 	error = smb_rq_enqueue(rqp);
1259 	if (error)
1260 		goto freerq;
1261 	if (leftpcount || leftdcount) {
1262 		error = smb_rq_reply(rqp);
1263 		if (error)
1264 			goto bad;
1265 		/*
1266 		 * this is an interim response, ignore it.
1267 		 */
1268 		SMBRQ_LOCK(rqp);
1269 		md_next_record(&rqp->sr_rp);
1270 		SMBRQ_UNLOCK(rqp);
1271 	}
1272 	while (leftpcount || leftdcount) {
1273 		error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1274 		if (error)
1275 			goto bad;
1276 		mbp = &rqp->sr_rq;
1277 		smb_rq_wstart(rqp);
1278 		mb_put_mem(mbp, NULL, 3, MB_MZERO);
1279 		mb_put_uint32le(mbp, totpcount);
1280 		mb_put_uint32le(mbp, totdcount);
1281 		len = mb_fixhdr(mbp);
1282 		/*
1283 		 * now we have known packet size as
1284 		 * ALIGN4(len + 6 * 4  + 2)
1285 		 * and need to decide which parts should go into request
1286 		 */
1287 		len = ALIGN4(len + 6 * 4 + 2);
1288 		if (len + leftpcount > txmax) {
1289 			txpcount = min(leftpcount, txmax - len);
1290 			poff = len;
1291 			txdcount = 0;
1292 			doff = 0;
1293 		} else {
1294 			txpcount = leftpcount;
1295 			poff = txpcount ? len : 0;
1296 			len = ALIGN4(len + txpcount);
1297 			txdcount = min(leftdcount, txmax - len);
1298 			doff = txdcount ? len : 0;
1299 		}
1300 		mb_put_uint32le(mbp, txpcount);
1301 		mb_put_uint32le(mbp, poff);
1302 		mb_put_uint32le(mbp, totpcount - leftpcount);
1303 		mb_put_uint32le(mbp, txdcount);
1304 		mb_put_uint32le(mbp, doff);
1305 		mb_put_uint32le(mbp, totdcount - leftdcount);
1306 		leftpcount -= txpcount;
1307 		leftdcount -= txdcount;
1308 		smb_rq_wend(rqp);
1309 		smb_rq_bstart(rqp);
1310 		len = mb_fixhdr(mbp);
1311 		if (txpcount) {
1312 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1313 			error = md_get_mbuf(&mbparam, txpcount, &m);
1314 			if (error)
1315 				goto bad;
1316 			mb_put_mbuf(mbp, m);
1317 		}
1318 		len = mb_fixhdr(mbp);
1319 		if (txdcount) {
1320 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1321 			error = md_get_mbuf(&mbdata, txdcount, &m);
1322 			if (error)
1323 				goto bad;
1324 			mb_put_mbuf(mbp, m);
1325 		}
1326 		smb_rq_bend(rqp);
1327 		error = smb_iod_multirq(rqp);
1328 		if (error)
1329 			goto bad;
1330 	}	/* while left params or data */
1331 	error = smb_nt_reply(ntp);
1332 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1333 		goto bad;
1334 	mdp = &ntp->nt_rdata;
1335 	if (mdp->md_top) {
1336 		m_fixhdr(mdp->md_top);
1337 		md_initm(mdp, mdp->md_top);
1338 	}
1339 	mdp = &ntp->nt_rparam;
1340 	if (mdp->md_top) {
1341 		m_fixhdr(mdp->md_top);
1342 		md_initm(mdp, mdp->md_top);
1343 	}
1344 bad:
1345 	smb_iod_removerq(rqp);
1346 freerq:
1347 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1348 		if (rqp->sr_flags & SMBR_RESTART)
1349 			ntp->nt_flags |= SMBT2_RESTART;
1350 		md_done(&ntp->nt_rparam);
1351 		md_done(&ntp->nt_rdata);
1352 	}
1353 	smb_rq_done(rqp);
1354 	return (error);
1355 }
1356 
1357 int
1358 smb_t2_request(struct smb_t2rq *t2p)
1359 {
1360 	int error = EINVAL, i;
1361 
1362 	for (i = 0; ; ) {
1363 		/*
1364 		 * Don't send any new requests if force unmount is underway.
1365 		 * This check was moved into smb_rq_enqueue, called by
1366 		 * smb_t2_request_int()
1367 		 */
1368 		t2p->t2_flags &= ~SMBT2_RESTART;
1369 		error = smb_t2_request_int(t2p);
1370 		if (!error)
1371 			break;
1372 		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1373 		    SMBT2_RESTART)
1374 			break;
1375 		if (++i > SMBMAXRESTARTS)
1376 			break;
1377 		mutex_enter(&(t2p)->t2_lock);
1378 		if (t2p->t2_share && t2p->t2_share->ss_mount) {
1379 			cv_timedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1380 			    lbolt + (hz * SMB_RCNDELAY));
1381 		} else {
1382 			delay(lbolt + (hz * SMB_RCNDELAY));
1383 		}
1384 		mutex_exit(&(t2p)->t2_lock);
1385 	}
1386 	return (error);
1387 }
1388 
1389 
1390 int
1391 smb_nt_request(struct smb_ntrq *ntp)
1392 {
1393 	int error = EINVAL, i;
1394 
1395 	for (i = 0; ; ) {
1396 		/*
1397 		 * Don't send any new requests if force unmount is underway.
1398 		 * This check was moved into smb_rq_enqueue, called by
1399 		 * smb_nt_request_int()
1400 		 */
1401 		ntp->nt_flags &= ~SMBT2_RESTART;
1402 		error = smb_nt_request_int(ntp);
1403 		if (!error)
1404 			break;
1405 		if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1406 		    SMBT2_RESTART)
1407 			break;
1408 		if (++i > SMBMAXRESTARTS)
1409 			break;
1410 		mutex_enter(&(ntp)->nt_lock);
1411 		if (ntp->nt_share && ntp->nt_share->ss_mount) {
1412 			cv_timedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1413 			    lbolt + (hz * SMB_RCNDELAY));
1414 
1415 		} else {
1416 			delay(lbolt + (hz * SMB_RCNDELAY));
1417 		}
1418 		mutex_exit(&(ntp)->nt_lock);
1419 	}
1420 	return (error);
1421 }
1422