xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c (revision 40c0e2317898b8c774791bdc2b30bd50111ab1fa)
1 /*
2  * Copyright (c) 2000-2001, Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
33  */
34 
35 /*
36  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37  * Copyright 2018 Nexenta Systems, Inc.  All rights reserved.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/time.h>
43 #include <sys/kmem.h>
44 #include <sys/proc.h>
45 #include <sys/lock.h>
46 #include <sys/socket.h>
47 #include <sys/mount.h>
48 #include <sys/sunddi.h>
49 #include <sys/cmn_err.h>
50 #include <sys/sdt.h>
51 
52 #include <netsmb/smb_osdep.h>
53 
54 #include <netsmb/smb.h>
55 #include <netsmb/smb_conn.h>
56 #include <netsmb/smb_subr.h>
57 #include <netsmb/smb_tran.h>
58 #include <netsmb/smb_rq.h>
59 
60 /*
61  * How long to wait before restarting a request (after reconnect)
62  */
63 #define	SMB_RCNDELAY		2	/* seconds */
64 
65 /*
66  * leave this zero - we can't ssecond guess server side effects of
67  * duplicate ops, this isn't nfs!
68  */
69 #define	SMBMAXRESTARTS		0
70 
71 
72 static int  smb_rq_reply(struct smb_rq *rqp);
73 static int  smb_rq_enqueue(struct smb_rq *rqp);
74 static int  smb_rq_getenv(struct smb_connobj *layer,
75 		struct smb_vc **vcpp, struct smb_share **sspp);
76 static int  smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
77 static int  smb_t2_reply(struct smb_t2rq *t2p);
78 static int  smb_nt_reply(struct smb_ntrq *ntp);
79 
80 
81 /*
82  * Done with a request object.  Free its contents.
83  * If it was allocated (SMBR_ALLOCED) free it too.
84  * Some of these are stack locals, not allocated.
85  *
86  * No locks here - this is the last ref.
87  */
88 void
89 smb_rq_done(struct smb_rq *rqp)
90 {
91 
92 	/*
93 	 * No smb_vc_rele() here - see smb_rq_init()
94 	 */
95 	mb_done(&rqp->sr_rq);
96 	md_done(&rqp->sr_rp);
97 	mutex_destroy(&rqp->sr_lock);
98 	cv_destroy(&rqp->sr_cond);
99 	if (rqp->sr_flags & SMBR_ALLOCED)
100 		kmem_free(rqp, sizeof (*rqp));
101 }
102 
103 int
104 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
105 	struct smb_rq **rqpp)
106 {
107 	struct smb_rq *rqp;
108 	int error;
109 
110 	rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
111 	if (rqp == NULL)
112 		return (ENOMEM);
113 	error = smb_rq_init(rqp, layer, cmd, scred);
114 	if (error) {
115 		smb_rq_done(rqp);
116 		return (error);
117 	}
118 	rqp->sr_flags |= SMBR_ALLOCED;
119 	*rqpp = rqp;
120 	return (0);
121 }
122 
123 int
124 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
125 	struct smb_cred *scred)
126 {
127 	int error;
128 
129 	bzero(rqp, sizeof (*rqp));
130 	mutex_init(&rqp->sr_lock, NULL,  MUTEX_DRIVER, NULL);
131 	cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
132 
133 	error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
134 	if (error)
135 		return (error);
136 
137 	/*
138 	 * We copied a VC pointer (vcp) into rqp->sr_vc,
139 	 * but we do NOT do a smb_vc_hold here.  Instead,
140 	 * the caller is responsible for the hold on the
141 	 * share or the VC as needed.  For smbfs callers,
142 	 * the hold is on the share, via the smbfs mount.
143 	 * For nsmb ioctl callers, the hold is done when
144 	 * the driver handle gets VC or share references.
145 	 * This design avoids frequent hold/rele activity
146 	 * when creating and completing requests.
147 	 */
148 
149 	rqp->sr_rexmit = SMBMAXRESTARTS;
150 	rqp->sr_cred = scred;	/* Note: ref hold done by caller. */
151 	rqp->sr_pid = (uint16_t)ddi_get_pid();
152 	error = smb_rq_new(rqp, cmd);
153 
154 	return (error);
155 }
156 
157 static int
158 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
159 {
160 	struct mbchain *mbp = &rqp->sr_rq;
161 	struct smb_vc *vcp = rqp->sr_vc;
162 	int error;
163 
164 	ASSERT(rqp != NULL);
165 
166 	rqp->sr_sendcnt = 0;
167 	rqp->sr_cmd = cmd;
168 
169 	mb_done(mbp);
170 	md_done(&rqp->sr_rp);
171 	error = mb_init(mbp);
172 	if (error)
173 		return (error);
174 
175 	/*
176 	 * Is this the right place to save the flags?
177 	 */
178 	rqp->sr_rqflags  = vcp->vc_hflags;
179 	rqp->sr_rqflags2 = vcp->vc_hflags2;
180 
181 	/*
182 	 * The SMB header is filled in later by
183 	 * smb_rq_fillhdr (see below)
184 	 * Just reserve space here.
185 	 */
186 	mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
187 
188 	return (0);
189 }
190 
191 /*
192  * Given a request with it's body already composed,
193  * rewind to the start and fill in the SMB header.
194  * This is called after the request is enqueued,
195  * so we have the final MID, seq num. etc.
196  */
197 void
198 smb_rq_fillhdr(struct smb_rq *rqp)
199 {
200 	struct mbchain mbtmp, *mbp = &mbtmp;
201 	mblk_t *m;
202 
203 	/*
204 	 * Fill in the SMB header using a dup of the first mblk,
205 	 * which points at the same data but has its own wptr,
206 	 * so we can rewind without trashing the message.
207 	 */
208 	m = dupb(rqp->sr_rq.mb_top);
209 	m->b_wptr = m->b_rptr;	/* rewind */
210 	mb_initm(mbp, m);
211 
212 	mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
213 	mb_put_uint8(mbp, rqp->sr_cmd);
214 	mb_put_uint32le(mbp, 0);	/* status */
215 	mb_put_uint8(mbp, rqp->sr_rqflags);
216 	mb_put_uint16le(mbp, rqp->sr_rqflags2);
217 	mb_put_uint16le(mbp, 0);	/* pid-high */
218 	mb_put_mem(mbp, NULL, 8, MB_MZERO);	/* MAC sig. (later) */
219 	mb_put_uint16le(mbp, 0);	/* reserved */
220 	mb_put_uint16le(mbp, rqp->sr_rqtid);
221 	mb_put_uint16le(mbp, rqp->sr_pid);
222 	mb_put_uint16le(mbp, rqp->sr_rquid);
223 	mb_put_uint16le(mbp, rqp->sr_mid);
224 
225 	/* This will free the mblk from dupb. */
226 	mb_done(mbp);
227 }
228 
229 int
230 smb_rq_simple(struct smb_rq *rqp)
231 {
232 	return (smb_rq_simple_timed(rqp, smb_timo_default));
233 }
234 
235 /*
236  * Simple request-reply exchange
237  */
238 int
239 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
240 {
241 	int error = EINVAL;
242 
243 	for (; ; ) {
244 		/*
245 		 * Don't send any new requests if force unmount is underway.
246 		 * This check was moved into smb_rq_enqueue.
247 		 */
248 		rqp->sr_flags &= ~SMBR_RESTART;
249 		rqp->sr_timo = timeout;	/* in seconds */
250 		rqp->sr_state = SMBRQ_NOTSENT;
251 		error = smb_rq_enqueue(rqp);
252 		if (error) {
253 			break;
254 		}
255 		error = smb_rq_reply(rqp);
256 		if (!error)
257 			break;
258 		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
259 		    SMBR_RESTART)
260 			break;
261 		if (rqp->sr_rexmit <= 0)
262 			break;
263 		SMBRQ_LOCK(rqp);
264 		if (rqp->sr_share) {
265 			(void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
266 			    SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
267 
268 		} else {
269 			delay(SEC_TO_TICK(SMB_RCNDELAY));
270 		}
271 		SMBRQ_UNLOCK(rqp);
272 		rqp->sr_rexmit--;
273 	}
274 	return (error);
275 }
276 
277 
278 static int
279 smb_rq_enqueue(struct smb_rq *rqp)
280 {
281 	struct smb_vc *vcp = rqp->sr_vc;
282 	struct smb_share *ssp = rqp->sr_share;
283 	int error = 0;
284 
285 	/*
286 	 * Normal requests may initiate a reconnect,
287 	 * and/or wait for state changes to finish.
288 	 * Some requests set the NORECONNECT flag
289 	 * to avoid all that (i.e. tree discon)
290 	 */
291 	if (rqp->sr_flags & SMBR_NORECONNECT) {
292 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
293 			SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
294 			return (ENOTCONN);
295 		}
296 		if (ssp != NULL &&
297 		    ((ssp->ss_flags & SMBS_CONNECTED) == 0))
298 			return (ENOTCONN);
299 		goto ok_out;
300 	}
301 
302 	/*
303 	 * If we're not connected, initiate a reconnect
304 	 * and/or wait for an existing one to finish.
305 	 */
306 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
307 		error = smb_iod_reconnect(vcp);
308 		if (error != 0)
309 			return (error);
310 	}
311 
312 	/*
313 	 * If this request has a "share" object
314 	 * that needs a tree connect, do it now.
315 	 */
316 	if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
317 		error = smb_share_tcon(ssp, rqp->sr_cred);
318 		if (error)
319 			return (error);
320 	}
321 
322 	/*
323 	 * We now know what UID + TID to use.
324 	 * Store them in the request.
325 	 */
326 ok_out:
327 	rqp->sr_rquid = vcp->vc_smbuid;
328 	rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
329 	error = smb_iod_addrq(rqp);
330 
331 	return (error);
332 }
333 
334 /*
335  * Used by the IOD thread during connection setup.
336  */
337 int
338 smb_rq_internal(struct smb_rq *rqp, int timeout)
339 {
340 	struct smb_vc *vcp = rqp->sr_vc;
341 	int err;
342 
343 	rqp->sr_flags &= ~SMBR_RESTART;
344 	rqp->sr_timo = timeout;	/* in seconds */
345 	rqp->sr_state = SMBRQ_NOTSENT;
346 
347 	/*
348 	 * Skip smb_rq_enqueue(rqp) here, as we don't want it
349 	 * trying to reconnect etc.  We're doing that.
350 	 */
351 	rqp->sr_rquid = vcp->vc_smbuid;
352 	rqp->sr_rqtid = SMB_TID_UNKNOWN;
353 	err = smb_iod_addrq(rqp);
354 	if (err != 0)
355 		return (err);
356 
357 	err = smb_rq_reply(rqp);
358 
359 	return (err);
360 }
361 
362 /*
363  * Mark location of the word count, which is filled in later by
364  * smb_rw_wend().  Also initialize the counter that it uses
365  * to figure out what value to fill in.
366  *
367  * Note that the word count happens to be 8-bit.
368  */
369 void
370 smb_rq_wstart(struct smb_rq *rqp)
371 {
372 	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
373 	rqp->sr_rq.mb_count = 0;
374 }
375 
376 void
377 smb_rq_wend(struct smb_rq *rqp)
378 {
379 	uint_t wcnt;
380 
381 	if (rqp->sr_wcount == NULL) {
382 		SMBSDEBUG("no wcount\n");
383 		return;
384 	}
385 	wcnt = rqp->sr_rq.mb_count;
386 	if (wcnt > 0x1ff)
387 		SMBSDEBUG("word count too large (%d)\n", wcnt);
388 	if (wcnt & 1)
389 		SMBSDEBUG("odd word count\n");
390 	/* Fill in the word count (8-bits) */
391 	*rqp->sr_wcount = (wcnt >> 1);
392 }
393 
394 /*
395  * Mark location of the byte count, which is filled in later by
396  * smb_rw_bend().  Also initialize the counter that it uses
397  * to figure out what value to fill in.
398  *
399  * Note that the byte count happens to be 16-bit.
400  */
401 void
402 smb_rq_bstart(struct smb_rq *rqp)
403 {
404 	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
405 	rqp->sr_rq.mb_count = 0;
406 }
407 
408 void
409 smb_rq_bend(struct smb_rq *rqp)
410 {
411 	uint_t bcnt;
412 
413 	if (rqp->sr_bcount == NULL) {
414 		SMBSDEBUG("no bcount\n");
415 		return;
416 	}
417 	bcnt = rqp->sr_rq.mb_count;
418 	if (bcnt > 0xffff)
419 		SMBSDEBUG("byte count too large (%d)\n", bcnt);
420 	/*
421 	 * Fill in the byte count (16-bits)
422 	 * The pointer is char * type due to
423 	 * typical off-by-one alignment.
424 	 */
425 	rqp->sr_bcount[0] = bcnt & 0xFF;
426 	rqp->sr_bcount[1] = (bcnt >> 8);
427 }
428 
429 int
430 smb_rq_intr(struct smb_rq *rqp)
431 {
432 	if (rqp->sr_flags & SMBR_INTR)
433 		return (EINTR);
434 
435 	return (0);
436 }
437 
438 static int
439 smb_rq_getenv(struct smb_connobj *co,
440 	struct smb_vc **vcpp, struct smb_share **sspp)
441 {
442 	struct smb_vc *vcp = NULL;
443 	struct smb_share *ssp = NULL;
444 	int error = EINVAL;
445 
446 	if (co->co_flags & SMBO_GONE) {
447 		SMBSDEBUG("zombie CO\n");
448 		error = EINVAL;
449 		goto out;
450 	}
451 
452 	switch (co->co_level) {
453 	case SMBL_SHARE:
454 		ssp = CPTOSS(co);
455 		if ((co->co_flags & SMBO_GONE) ||
456 		    co->co_parent == NULL) {
457 			SMBSDEBUG("zombie share %s\n", ssp->ss_name);
458 			break;
459 		}
460 		/* instead of recursion... */
461 		co = co->co_parent;
462 		/* FALLTHROUGH */
463 	case SMBL_VC:
464 		vcp = CPTOVC(co);
465 		if ((co->co_flags & SMBO_GONE) ||
466 		    co->co_parent == NULL) {
467 			SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
468 			break;
469 		}
470 		error = 0;
471 		break;
472 
473 	default:
474 		SMBSDEBUG("invalid level %d passed\n", co->co_level);
475 	}
476 
477 out:
478 	if (!error) {
479 		if (vcpp)
480 			*vcpp = vcp;
481 		if (sspp)
482 			*sspp = ssp;
483 	}
484 
485 	return (error);
486 }
487 
488 /*
489  * Wait for reply on the request
490  */
491 static int
492 smb_rq_reply(struct smb_rq *rqp)
493 {
494 	struct mdchain *mdp = &rqp->sr_rp;
495 	u_int8_t tb;
496 	int error, rperror = 0;
497 
498 	if (rqp->sr_timo == SMBNOREPLYWAIT) {
499 		smb_iod_removerq(rqp);
500 		return (0);
501 	}
502 
503 	error = smb_iod_waitrq(rqp);
504 	if (error)
505 		return (error);
506 
507 	/*
508 	 * If the request was signed, validate the
509 	 * signature on the response.
510 	 */
511 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
512 		error = smb_rq_verify(rqp);
513 		if (error)
514 			return (error);
515 	}
516 
517 	/*
518 	 * Parse the SMB header
519 	 */
520 	error = md_get_uint32le(mdp, NULL);
521 	if (error)
522 		return (error);
523 	error = md_get_uint8(mdp, &tb);
524 	error = md_get_uint32le(mdp, &rqp->sr_error);
525 	error = md_get_uint8(mdp, &rqp->sr_rpflags);
526 	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
527 
528 	if (rqp->sr_error != 0) {
529 		if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
530 			rperror = smb_maperr32(rqp->sr_error);
531 		} else {
532 			uint8_t errClass = rqp->sr_error & 0xff;
533 			uint16_t errCode = rqp->sr_error >> 16;
534 			/* Convert to NT status */
535 			rqp->sr_error = smb_doserr2status(errClass, errCode);
536 			rperror = smb_maperror(errClass, errCode);
537 		}
538 	}
539 
540 	if (rperror) {
541 		/*
542 		 * Do a special check for STATUS_BUFFER_OVERFLOW;
543 		 * it's not an error.
544 		 */
545 		if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
546 			/*
547 			 * Don't report it as an error to our caller;
548 			 * they can look at rqp->sr_error if they
549 			 * need to know whether we got a
550 			 * STATUS_BUFFER_OVERFLOW.
551 			 */
552 			rqp->sr_flags |= SMBR_MOREDATA;
553 			rperror = 0;
554 		}
555 	} else {
556 		rqp->sr_flags &= ~SMBR_MOREDATA;
557 	}
558 
559 	error = md_get_uint32le(mdp, NULL);
560 	error = md_get_uint32le(mdp, NULL);
561 	error = md_get_uint32le(mdp, NULL);
562 
563 	error = md_get_uint16le(mdp, &rqp->sr_rptid);
564 	error = md_get_uint16le(mdp, &rqp->sr_rppid);
565 	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
566 	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
567 
568 	return ((error) ? error : rperror);
569 }
570 
571 
572 #define	ALIGN4(a)	(((a) + 3) & ~3)
573 
574 /*
575  * TRANS2 request implementation
576  * TRANS implementation is in the "t2" routines
577  * NT_TRANSACTION implementation is the separate "nt" stuff
578  */
579 int
580 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
581 	struct smb_t2rq **t2pp)
582 {
583 	struct smb_t2rq *t2p;
584 	int error;
585 
586 	t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
587 	if (t2p == NULL)
588 		return (ENOMEM);
589 	error = smb_t2_init(t2p, layer, &setup, 1, scred);
590 	t2p->t2_flags |= SMBT2_ALLOCED;
591 	if (error) {
592 		smb_t2_done(t2p);
593 		return (error);
594 	}
595 	*t2pp = t2p;
596 	return (0);
597 }
598 
599 int
600 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
601 	struct smb_ntrq **ntpp)
602 {
603 	struct smb_ntrq *ntp;
604 	int error;
605 
606 	ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
607 	if (ntp == NULL)
608 		return (ENOMEM);
609 	error = smb_nt_init(ntp, layer, fn, scred);
610 	mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
611 	cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
612 	ntp->nt_flags |= SMBT2_ALLOCED;
613 	if (error) {
614 		smb_nt_done(ntp);
615 		return (error);
616 	}
617 	*ntpp = ntp;
618 	return (0);
619 }
620 
621 int
622 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
623 	int setupcnt, struct smb_cred *scred)
624 {
625 	int i;
626 	int error;
627 
628 	bzero(t2p, sizeof (*t2p));
629 	mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
630 	cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
631 
632 	t2p->t2_source = source;
633 	t2p->t2_setupcount = (u_int16_t)setupcnt;
634 	t2p->t2_setupdata = t2p->t2_setup;
635 	for (i = 0; i < setupcnt; i++)
636 		t2p->t2_setup[i] = setup[i];
637 	t2p->t2_fid = 0xffff;
638 	t2p->t2_cred = scred;
639 	t2p->t2_share = (source->co_level == SMBL_SHARE ?
640 	    CPTOSS(source) : NULL); /* for smb up/down */
641 	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
642 	if (error)
643 		return (error);
644 	return (0);
645 }
646 
647 int
648 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
649 	struct smb_cred *scred)
650 {
651 	int error;
652 
653 	bzero(ntp, sizeof (*ntp));
654 	ntp->nt_source = source;
655 	ntp->nt_function = fn;
656 	ntp->nt_cred = scred;
657 	ntp->nt_share = (source->co_level == SMBL_SHARE ?
658 	    CPTOSS(source) : NULL); /* for smb up/down */
659 	error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
660 	if (error)
661 		return (error);
662 	return (0);
663 }
664 
665 void
666 smb_t2_done(struct smb_t2rq *t2p)
667 {
668 	mb_done(&t2p->t2_tparam);
669 	mb_done(&t2p->t2_tdata);
670 	md_done(&t2p->t2_rparam);
671 	md_done(&t2p->t2_rdata);
672 	mutex_destroy(&t2p->t2_lock);
673 	cv_destroy(&t2p->t2_cond);
674 	if (t2p->t2_flags & SMBT2_ALLOCED)
675 		kmem_free(t2p, sizeof (*t2p));
676 }
677 
678 void
679 smb_nt_done(struct smb_ntrq *ntp)
680 {
681 	mb_done(&ntp->nt_tsetup);
682 	mb_done(&ntp->nt_tparam);
683 	mb_done(&ntp->nt_tdata);
684 	md_done(&ntp->nt_rparam);
685 	md_done(&ntp->nt_rdata);
686 	cv_destroy(&ntp->nt_cond);
687 	mutex_destroy(&ntp->nt_lock);
688 	if (ntp->nt_flags & SMBT2_ALLOCED)
689 		kmem_free(ntp, sizeof (*ntp));
690 }
691 
692 /*
693  * Extract data [offset,count] from mtop and add to mdp.
694  */
695 static int
696 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
697 	struct mdchain *mdp)
698 {
699 	mblk_t *n;
700 
701 	n = m_copym(mtop, offset, count, M_WAITOK);
702 	if (n == NULL)
703 		return (EBADRPC);
704 
705 	if (mdp->md_top == NULL) {
706 		md_initm(mdp, n);
707 	} else
708 		m_cat(mdp->md_top, n);
709 
710 	return (0);
711 }
712 
713 static int
714 smb_t2_reply(struct smb_t2rq *t2p)
715 {
716 	struct mdchain *mdp;
717 	struct smb_rq *rqp = t2p->t2_rq;
718 	int error, error2, totpgot, totdgot;
719 	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
720 	u_int16_t tmp, bc, dcount;
721 	u_int8_t wc;
722 
723 	t2p->t2_flags &= ~SMBT2_MOREDATA;
724 
725 	error = smb_rq_reply(rqp);
726 	if (rqp->sr_flags & SMBR_MOREDATA)
727 		t2p->t2_flags |= SMBT2_MOREDATA;
728 	t2p->t2_sr_errclass = rqp->sr_errclass;
729 	t2p->t2_sr_serror = rqp->sr_serror;
730 	t2p->t2_sr_error = rqp->sr_error;
731 	t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
732 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
733 		return (error);
734 	/*
735 	 * Now we have to get all subseqent responses, if any.
736 	 * The CIFS specification says that they can be misordered,
737 	 * which is weird.
738 	 * TODO: timo
739 	 */
740 	totpgot = totdgot = 0;
741 	totpcount = totdcount = 0xffff;
742 	mdp = &rqp->sr_rp;
743 	for (;;) {
744 		DTRACE_PROBE2(smb_trans_reply,
745 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
746 		m_dumpm(mdp->md_top);
747 
748 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
749 			break;
750 		if (wc < 10) {
751 			error2 = ENOENT;
752 			break;
753 		}
754 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
755 			break;
756 		if (totpcount > tmp)
757 			totpcount = tmp;
758 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
759 			break;
760 		if (totdcount > tmp)
761 			totdcount = tmp;
762 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
763 		    (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
764 		    (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
765 		    (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
766 			break;
767 		if (pcount != 0 && pdisp != totpgot) {
768 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
769 			    pdisp, totpgot);
770 			error2 = EINVAL;
771 			break;
772 		}
773 		if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
774 		    (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
775 		    (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
776 			break;
777 		if (dcount != 0 && ddisp != totdgot) {
778 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
779 			    dcount);
780 			error2 = EINVAL;
781 			break;
782 		}
783 
784 		/* XXX: Skip setup words?  We don't save them? */
785 		md_get_uint8(mdp, &wc);  /* SetupCount */
786 		md_get_uint8(mdp, NULL); /* Reserved2 */
787 		tmp = wc;
788 		while (tmp--)
789 			md_get_uint16le(mdp, NULL);
790 
791 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
792 			break;
793 
794 		/*
795 		 * There are pad bytes here, and the poff value
796 		 * indicates where the next data are found.
797 		 * No need to guess at the padding size.
798 		 */
799 		if (pcount) {
800 			error2 = smb_t2_placedata(mdp->md_top, poff,
801 			    pcount, &t2p->t2_rparam);
802 			if (error2)
803 				break;
804 		}
805 		totpgot += pcount;
806 
807 		if (dcount) {
808 			error2 = smb_t2_placedata(mdp->md_top, doff,
809 			    dcount, &t2p->t2_rdata);
810 			if (error2)
811 				break;
812 		}
813 		totdgot += dcount;
814 
815 		if (totpgot >= totpcount && totdgot >= totdcount) {
816 			error2 = 0;
817 			t2p->t2_flags |= SMBT2_ALLRECV;
818 			break;
819 		}
820 		/*
821 		 * We're done with this reply, look for the next one.
822 		 */
823 		SMBRQ_LOCK(rqp);
824 		md_next_record(&rqp->sr_rp);
825 		SMBRQ_UNLOCK(rqp);
826 		error2 = smb_rq_reply(rqp);
827 		if (rqp->sr_flags & SMBR_MOREDATA)
828 			t2p->t2_flags |= SMBT2_MOREDATA;
829 		if (!error2)
830 			continue;
831 		t2p->t2_sr_errclass = rqp->sr_errclass;
832 		t2p->t2_sr_serror = rqp->sr_serror;
833 		t2p->t2_sr_error = rqp->sr_error;
834 		t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
835 		error = error2;
836 		if (!(rqp->sr_flags & SMBR_MOREDATA))
837 			break;
838 	}
839 	return (error ? error : error2);
840 }
841 
842 static int
843 smb_nt_reply(struct smb_ntrq *ntp)
844 {
845 	struct mdchain *mdp;
846 	struct smb_rq *rqp = ntp->nt_rq;
847 	int error, error2;
848 	u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
849 	u_int32_t tmp, dcount, totpgot, totdgot;
850 	u_int16_t bc;
851 	u_int8_t wc;
852 
853 	ntp->nt_flags &= ~SMBT2_MOREDATA;
854 
855 	error = smb_rq_reply(rqp);
856 	if (rqp->sr_flags & SMBR_MOREDATA)
857 		ntp->nt_flags |= SMBT2_MOREDATA;
858 	ntp->nt_sr_error = rqp->sr_error;
859 	ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
860 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
861 		return (error);
862 	/*
863 	 * Now we have to get all subseqent responses. The CIFS specification
864 	 * says that they can be misordered which is weird.
865 	 * TODO: timo
866 	 */
867 	totpgot = totdgot = 0;
868 	totpcount = totdcount = 0xffffffff;
869 	mdp = &rqp->sr_rp;
870 	for (;;) {
871 		DTRACE_PROBE2(smb_trans_reply,
872 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
873 		m_dumpm(mdp->md_top);
874 
875 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
876 			break;
877 		if (wc < 18) {
878 			error2 = ENOENT;
879 			break;
880 		}
881 		md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
882 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
883 			break;
884 		if (totpcount > tmp)
885 			totpcount = tmp;
886 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
887 			break;
888 		if (totdcount > tmp)
889 			totdcount = tmp;
890 		if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
891 		    (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
892 		    (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
893 			break;
894 		if (pcount != 0 && pdisp != totpgot) {
895 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
896 			    pdisp, totpgot);
897 			error2 = EINVAL;
898 			break;
899 		}
900 		if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
901 		    (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
902 		    (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
903 			break;
904 		if (dcount != 0 && ddisp != totdgot) {
905 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
906 			    dcount);
907 			error2 = EINVAL;
908 			break;
909 		}
910 
911 		/* XXX: Skip setup words?  We don't save them? */
912 		md_get_uint8(mdp, &wc);  /* SetupCount */
913 		tmp = wc;
914 		while (tmp--)
915 			md_get_uint16le(mdp, NULL);
916 
917 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
918 			break;
919 
920 		/*
921 		 * There are pad bytes here, and the poff value
922 		 * indicates where the next data are found.
923 		 * No need to guess at the padding size.
924 		 */
925 		if (pcount) {
926 			error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
927 			    &ntp->nt_rparam);
928 			if (error2)
929 				break;
930 		}
931 		totpgot += pcount;
932 
933 		if (dcount) {
934 			error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
935 			    &ntp->nt_rdata);
936 			if (error2)
937 				break;
938 		}
939 		totdgot += dcount;
940 
941 		if (totpgot >= totpcount && totdgot >= totdcount) {
942 			error2 = 0;
943 			ntp->nt_flags |= SMBT2_ALLRECV;
944 			break;
945 		}
946 		/*
947 		 * We're done with this reply, look for the next one.
948 		 */
949 		SMBRQ_LOCK(rqp);
950 		md_next_record(&rqp->sr_rp);
951 		SMBRQ_UNLOCK(rqp);
952 		error2 = smb_rq_reply(rqp);
953 		if (rqp->sr_flags & SMBR_MOREDATA)
954 			ntp->nt_flags |= SMBT2_MOREDATA;
955 		if (!error2)
956 			continue;
957 		ntp->nt_sr_error = rqp->sr_error;
958 		ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
959 		error = error2;
960 		if (!(rqp->sr_flags & SMBR_MOREDATA))
961 			break;
962 	}
963 	return (error ? error : error2);
964 }
965 
966 /*
967  * Perform a full round of TRANS2 request
968  */
969 static int
970 smb_t2_request_int(struct smb_t2rq *t2p)
971 {
972 	struct smb_vc *vcp = t2p->t2_vc;
973 	struct smb_cred *scred = t2p->t2_cred;
974 	struct mbchain *mbp;
975 	struct mdchain *mdp, mbparam, mbdata;
976 	mblk_t *m;
977 	struct smb_rq *rqp;
978 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
979 	int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
980 
981 	m = t2p->t2_tparam.mb_top;
982 	if (m) {
983 		md_initm(&mbparam, m);	/* do not free it! */
984 		totpcount = m_fixhdr(m);
985 		if (totpcount > 0xffff)		/* maxvalue for ushort_t */
986 			return (EINVAL);
987 	} else
988 		totpcount = 0;
989 	m = t2p->t2_tdata.mb_top;
990 	if (m) {
991 		md_initm(&mbdata, m);	/* do not free it! */
992 		totdcount = m_fixhdr(m);
993 		if (totdcount > 0xffff)
994 			return (EINVAL);
995 	} else
996 		totdcount = 0;
997 	leftdcount = totdcount;
998 	leftpcount = totpcount;
999 	txmax = vcp->vc_txmax;
1000 	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
1001 	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
1002 	if (error)
1003 		return (error);
1004 	rqp->sr_timo = smb_timo_default;
1005 	rqp->sr_flags |= SMBR_MULTIPACKET;
1006 	t2p->t2_rq = rqp;
1007 	mbp = &rqp->sr_rq;
1008 	smb_rq_wstart(rqp);
1009 	mb_put_uint16le(mbp, totpcount);
1010 	mb_put_uint16le(mbp, totdcount);
1011 	mb_put_uint16le(mbp, t2p->t2_maxpcount);
1012 	mb_put_uint16le(mbp, t2p->t2_maxdcount);
1013 	mb_put_uint8(mbp, t2p->t2_maxscount);
1014 	mb_put_uint8(mbp, 0);			/* reserved */
1015 	mb_put_uint16le(mbp, 0);			/* flags */
1016 	mb_put_uint32le(mbp, 0);			/* Timeout */
1017 	mb_put_uint16le(mbp, 0);			/* reserved 2 */
1018 	len = mb_fixhdr(mbp);
1019 
1020 	/*
1021 	 * Now we know the size of the trans overhead stuff:
1022 	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
1023 	 * where nmsize is the OTW size of the name, including
1024 	 * the unicode null terminator and any alignment.
1025 	 * Use this to decide which parts (and how much)
1026 	 * can go into this request: params, data
1027 	 */
1028 	nmlen = t2p->t_name ? t2p->t_name_len : 0;
1029 	nmsize = nmlen + 1; /* null term. */
1030 	if (SMB_UNICODE_STRINGS(vcp)) {
1031 		nmsize *= 2;
1032 		/* we know put_dmem will need to align */
1033 		nmsize += 1;
1034 	}
1035 	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
1036 	if (len + leftpcount > txmax) {
1037 		txpcount = min(leftpcount, txmax - len);
1038 		poff = len;
1039 		txdcount = 0;
1040 		doff = 0;
1041 	} else {
1042 		txpcount = leftpcount;
1043 		poff = txpcount ? len : 0;
1044 		/*
1045 		 * Other client traffic seems to "ALIGN2" here.  The extra
1046 		 * 2 byte pad we use has no observed downside and may be
1047 		 * required for some old servers(?)
1048 		 */
1049 		len = ALIGN4(len + txpcount);
1050 		txdcount = min(leftdcount, txmax - len);
1051 		doff = txdcount ? len : 0;
1052 	}
1053 	leftpcount -= txpcount;
1054 	leftdcount -= txdcount;
1055 	mb_put_uint16le(mbp, txpcount);
1056 	mb_put_uint16le(mbp, poff);
1057 	mb_put_uint16le(mbp, txdcount);
1058 	mb_put_uint16le(mbp, doff);
1059 	mb_put_uint8(mbp, t2p->t2_setupcount);
1060 	mb_put_uint8(mbp, 0);
1061 	for (i = 0; i < t2p->t2_setupcount; i++) {
1062 		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1063 	}
1064 	smb_rq_wend(rqp);
1065 	smb_rq_bstart(rqp);
1066 	if (t2p->t_name) {
1067 		/* Put the string and terminating null. */
1068 		error = smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1069 		    SMB_CS_NONE, NULL);
1070 	} else {
1071 		/* nmsize accounts for padding, char size. */
1072 		error = mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1073 	}
1074 	if (error)
1075 		goto freerq;
1076 	len = mb_fixhdr(mbp);
1077 	if (txpcount) {
1078 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1079 		error = md_get_mbuf(&mbparam, txpcount, &m);
1080 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1081 		if (error)
1082 			goto freerq;
1083 		mb_put_mbuf(mbp, m);
1084 	}
1085 	len = mb_fixhdr(mbp);
1086 	if (txdcount) {
1087 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1088 		error = md_get_mbuf(&mbdata, txdcount, &m);
1089 		if (error)
1090 			goto freerq;
1091 		mb_put_mbuf(mbp, m);
1092 	}
1093 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1094 	error = smb_rq_enqueue(rqp);
1095 	if (error)
1096 		goto freerq;
1097 	if (leftpcount || leftdcount) {
1098 		error = smb_rq_reply(rqp);
1099 		if (error)
1100 			goto bad;
1101 		/*
1102 		 * this is an interim response, ignore it.
1103 		 */
1104 		SMBRQ_LOCK(rqp);
1105 		md_next_record(&rqp->sr_rp);
1106 		SMBRQ_UNLOCK(rqp);
1107 	}
1108 	while (leftpcount || leftdcount) {
1109 		error = smb_rq_new(rqp, t2p->t_name ?
1110 		    SMB_COM_TRANSACTION_SECONDARY :
1111 		    SMB_COM_TRANSACTION2_SECONDARY);
1112 		if (error)
1113 			goto bad;
1114 		mbp = &rqp->sr_rq;
1115 		smb_rq_wstart(rqp);
1116 		mb_put_uint16le(mbp, totpcount);
1117 		mb_put_uint16le(mbp, totdcount);
1118 		len = mb_fixhdr(mbp);
1119 		/*
1120 		 * now we have known packet size as
1121 		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1122 		 * and need to decide which parts should go into request
1123 		 */
1124 		len = ALIGN4(len + 6 * 2 + 2);
1125 		if (t2p->t_name == NULL)
1126 			len += 2;
1127 		if (len + leftpcount > txmax) {
1128 			txpcount = min(leftpcount, txmax - len);
1129 			poff = len;
1130 			txdcount = 0;
1131 			doff = 0;
1132 		} else {
1133 			txpcount = leftpcount;
1134 			poff = txpcount ? len : 0;
1135 			len = ALIGN4(len + txpcount);
1136 			txdcount = min(leftdcount, txmax - len);
1137 			doff = txdcount ? len : 0;
1138 		}
1139 		mb_put_uint16le(mbp, txpcount);
1140 		mb_put_uint16le(mbp, poff);
1141 		mb_put_uint16le(mbp, totpcount - leftpcount);
1142 		mb_put_uint16le(mbp, txdcount);
1143 		mb_put_uint16le(mbp, doff);
1144 		mb_put_uint16le(mbp, totdcount - leftdcount);
1145 		leftpcount -= txpcount;
1146 		leftdcount -= txdcount;
1147 		if (t2p->t_name == NULL)
1148 			mb_put_uint16le(mbp, t2p->t2_fid);
1149 		smb_rq_wend(rqp);
1150 		smb_rq_bstart(rqp);
1151 		mb_put_uint8(mbp, 0);	/* name */
1152 		len = mb_fixhdr(mbp);
1153 		if (txpcount) {
1154 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1155 			error = md_get_mbuf(&mbparam, txpcount, &m);
1156 			if (error)
1157 				goto bad;
1158 			mb_put_mbuf(mbp, m);
1159 		}
1160 		len = mb_fixhdr(mbp);
1161 		if (txdcount) {
1162 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1163 			error = md_get_mbuf(&mbdata, txdcount, &m);
1164 			if (error)
1165 				goto bad;
1166 			mb_put_mbuf(mbp, m);
1167 		}
1168 		smb_rq_bend(rqp);
1169 		error = smb_iod_multirq(rqp);
1170 		if (error)
1171 			goto bad;
1172 	}	/* while left params or data */
1173 	error = smb_t2_reply(t2p);
1174 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1175 		goto bad;
1176 	mdp = &t2p->t2_rdata;
1177 	if (mdp->md_top) {
1178 		md_initm(mdp, mdp->md_top);
1179 	}
1180 	mdp = &t2p->t2_rparam;
1181 	if (mdp->md_top) {
1182 		md_initm(mdp, mdp->md_top);
1183 	}
1184 bad:
1185 	smb_iod_removerq(rqp);
1186 freerq:
1187 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1188 		if (rqp->sr_flags & SMBR_RESTART)
1189 			t2p->t2_flags |= SMBT2_RESTART;
1190 		md_done(&t2p->t2_rparam);
1191 		md_done(&t2p->t2_rdata);
1192 	}
1193 	smb_rq_done(rqp);
1194 	return (error);
1195 }
1196 
1197 
1198 /*
1199  * Perform a full round of NT_TRANSACTION request
1200  */
1201 static int
1202 smb_nt_request_int(struct smb_ntrq *ntp)
1203 {
1204 	struct smb_vc *vcp = ntp->nt_vc;
1205 	struct smb_cred *scred = ntp->nt_cred;
1206 	struct mbchain *mbp;
1207 	struct mdchain *mdp, mbsetup, mbparam, mbdata;
1208 	mblk_t *m;
1209 	struct smb_rq *rqp;
1210 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1211 	int error, doff, poff, txdcount, txpcount;
1212 	int totscount;
1213 
1214 	m = ntp->nt_tsetup.mb_top;
1215 	if (m) {
1216 		md_initm(&mbsetup, m);	/* do not free it! */
1217 		totscount = m_fixhdr(m);
1218 		if (totscount > 2 * 0xff)
1219 			return (EINVAL);
1220 	} else
1221 		totscount = 0;
1222 	m = ntp->nt_tparam.mb_top;
1223 	if (m) {
1224 		md_initm(&mbparam, m);	/* do not free it! */
1225 		totpcount = m_fixhdr(m);
1226 		if (totpcount > 0x7fffffff)
1227 			return (EINVAL);
1228 	} else
1229 		totpcount = 0;
1230 	m = ntp->nt_tdata.mb_top;
1231 	if (m) {
1232 		md_initm(&mbdata, m);	/* do not free it! */
1233 		totdcount =  m_fixhdr(m);
1234 		if (totdcount > 0x7fffffff)
1235 			return (EINVAL);
1236 	} else
1237 		totdcount = 0;
1238 	leftdcount = totdcount;
1239 	leftpcount = totpcount;
1240 	txmax = vcp->vc_txmax;
1241 	error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1242 	if (error)
1243 		return (error);
1244 	rqp->sr_timo = smb_timo_default;
1245 	rqp->sr_flags |= SMBR_MULTIPACKET;
1246 	ntp->nt_rq = rqp;
1247 	mbp = &rqp->sr_rq;
1248 	smb_rq_wstart(rqp);
1249 	mb_put_uint8(mbp, ntp->nt_maxscount);
1250 	mb_put_uint16le(mbp, 0);	/* reserved (flags?) */
1251 	mb_put_uint32le(mbp, totpcount);
1252 	mb_put_uint32le(mbp, totdcount);
1253 	mb_put_uint32le(mbp, ntp->nt_maxpcount);
1254 	mb_put_uint32le(mbp, ntp->nt_maxdcount);
1255 	len = mb_fixhdr(mbp);
1256 	/*
1257 	 * now we have known packet size as
1258 	 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1259 	 * and need to decide which parts should go into the first request
1260 	 */
1261 	len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1262 	if (len + leftpcount > txmax) {
1263 		txpcount = min(leftpcount, txmax - len);
1264 		poff = len;
1265 		txdcount = 0;
1266 		doff = 0;
1267 	} else {
1268 		txpcount = leftpcount;
1269 		poff = txpcount ? len : 0;
1270 		len = ALIGN4(len + txpcount);
1271 		txdcount = min(leftdcount, txmax - len);
1272 		doff = txdcount ? len : 0;
1273 	}
1274 	leftpcount -= txpcount;
1275 	leftdcount -= txdcount;
1276 	mb_put_uint32le(mbp, txpcount);
1277 	mb_put_uint32le(mbp, poff);
1278 	mb_put_uint32le(mbp, txdcount);
1279 	mb_put_uint32le(mbp, doff);
1280 	mb_put_uint8(mbp, (totscount+1)/2);
1281 	mb_put_uint16le(mbp, ntp->nt_function);
1282 	if (totscount) {
1283 		error = md_get_mbuf(&mbsetup, totscount, &m);
1284 		SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1285 		if (error)
1286 			goto freerq;
1287 		mb_put_mbuf(mbp, m);
1288 		if (totscount & 1)
1289 			mb_put_uint8(mbp, 0); /* setup is in words */
1290 	}
1291 	smb_rq_wend(rqp);
1292 	smb_rq_bstart(rqp);
1293 	len = mb_fixhdr(mbp);
1294 	if (txpcount) {
1295 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1296 		error = md_get_mbuf(&mbparam, txpcount, &m);
1297 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1298 		if (error)
1299 			goto freerq;
1300 		mb_put_mbuf(mbp, m);
1301 	}
1302 	len = mb_fixhdr(mbp);
1303 	if (txdcount) {
1304 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1305 		error = md_get_mbuf(&mbdata, txdcount, &m);
1306 		if (error)
1307 			goto freerq;
1308 		mb_put_mbuf(mbp, m);
1309 	}
1310 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1311 	error = smb_rq_enqueue(rqp);
1312 	if (error)
1313 		goto freerq;
1314 	if (leftpcount || leftdcount) {
1315 		error = smb_rq_reply(rqp);
1316 		if (error)
1317 			goto bad;
1318 		/*
1319 		 * this is an interim response, ignore it.
1320 		 */
1321 		SMBRQ_LOCK(rqp);
1322 		md_next_record(&rqp->sr_rp);
1323 		SMBRQ_UNLOCK(rqp);
1324 	}
1325 	while (leftpcount || leftdcount) {
1326 		error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1327 		if (error)
1328 			goto bad;
1329 		mbp = &rqp->sr_rq;
1330 		smb_rq_wstart(rqp);
1331 		mb_put_mem(mbp, NULL, 3, MB_MZERO);
1332 		mb_put_uint32le(mbp, totpcount);
1333 		mb_put_uint32le(mbp, totdcount);
1334 		len = mb_fixhdr(mbp);
1335 		/*
1336 		 * now we have known packet size as
1337 		 * ALIGN4(len + 6 * 4  + 2)
1338 		 * and need to decide which parts should go into request
1339 		 */
1340 		len = ALIGN4(len + 6 * 4 + 2);
1341 		if (len + leftpcount > txmax) {
1342 			txpcount = min(leftpcount, txmax - len);
1343 			poff = len;
1344 			txdcount = 0;
1345 			doff = 0;
1346 		} else {
1347 			txpcount = leftpcount;
1348 			poff = txpcount ? len : 0;
1349 			len = ALIGN4(len + txpcount);
1350 			txdcount = min(leftdcount, txmax - len);
1351 			doff = txdcount ? len : 0;
1352 		}
1353 		mb_put_uint32le(mbp, txpcount);
1354 		mb_put_uint32le(mbp, poff);
1355 		mb_put_uint32le(mbp, totpcount - leftpcount);
1356 		mb_put_uint32le(mbp, txdcount);
1357 		mb_put_uint32le(mbp, doff);
1358 		mb_put_uint32le(mbp, totdcount - leftdcount);
1359 		leftpcount -= txpcount;
1360 		leftdcount -= txdcount;
1361 		smb_rq_wend(rqp);
1362 		smb_rq_bstart(rqp);
1363 		len = mb_fixhdr(mbp);
1364 		if (txpcount) {
1365 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1366 			error = md_get_mbuf(&mbparam, txpcount, &m);
1367 			if (error)
1368 				goto bad;
1369 			mb_put_mbuf(mbp, m);
1370 		}
1371 		len = mb_fixhdr(mbp);
1372 		if (txdcount) {
1373 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1374 			error = md_get_mbuf(&mbdata, txdcount, &m);
1375 			if (error)
1376 				goto bad;
1377 			mb_put_mbuf(mbp, m);
1378 		}
1379 		smb_rq_bend(rqp);
1380 		error = smb_iod_multirq(rqp);
1381 		if (error)
1382 			goto bad;
1383 	}	/* while left params or data */
1384 	error = smb_nt_reply(ntp);
1385 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1386 		goto bad;
1387 	mdp = &ntp->nt_rdata;
1388 	if (mdp->md_top) {
1389 		md_initm(mdp, mdp->md_top);
1390 	}
1391 	mdp = &ntp->nt_rparam;
1392 	if (mdp->md_top) {
1393 		md_initm(mdp, mdp->md_top);
1394 	}
1395 bad:
1396 	smb_iod_removerq(rqp);
1397 freerq:
1398 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1399 		if (rqp->sr_flags & SMBR_RESTART)
1400 			ntp->nt_flags |= SMBT2_RESTART;
1401 		md_done(&ntp->nt_rparam);
1402 		md_done(&ntp->nt_rdata);
1403 	}
1404 	smb_rq_done(rqp);
1405 	return (error);
1406 }
1407 
1408 int
1409 smb_t2_request(struct smb_t2rq *t2p)
1410 {
1411 	int error = EINVAL, i;
1412 
1413 	for (i = 0; ; ) {
1414 		/*
1415 		 * Don't send any new requests if force unmount is underway.
1416 		 * This check was moved into smb_rq_enqueue, called by
1417 		 * smb_t2_request_int()
1418 		 */
1419 		t2p->t2_flags &= ~SMBT2_RESTART;
1420 		error = smb_t2_request_int(t2p);
1421 		if (!error)
1422 			break;
1423 		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1424 		    SMBT2_RESTART)
1425 			break;
1426 		if (++i > SMBMAXRESTARTS)
1427 			break;
1428 		mutex_enter(&(t2p)->t2_lock);
1429 		if (t2p->t2_share) {
1430 			(void) cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1431 			    SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1432 		} else {
1433 			delay(SEC_TO_TICK(SMB_RCNDELAY));
1434 		}
1435 		mutex_exit(&(t2p)->t2_lock);
1436 	}
1437 	return (error);
1438 }
1439 
1440 
1441 int
1442 smb_nt_request(struct smb_ntrq *ntp)
1443 {
1444 	int error = EINVAL, i;
1445 
1446 	for (i = 0; ; ) {
1447 		/*
1448 		 * Don't send any new requests if force unmount is underway.
1449 		 * This check was moved into smb_rq_enqueue, called by
1450 		 * smb_nt_request_int()
1451 		 */
1452 		ntp->nt_flags &= ~SMBT2_RESTART;
1453 		error = smb_nt_request_int(ntp);
1454 		if (!error)
1455 			break;
1456 		if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1457 		    SMBT2_RESTART)
1458 			break;
1459 		if (++i > SMBMAXRESTARTS)
1460 			break;
1461 		mutex_enter(&(ntp)->nt_lock);
1462 		if (ntp->nt_share) {
1463 			(void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1464 			    SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1465 
1466 		} else {
1467 			delay(SEC_TO_TICK(SMB_RCNDELAY));
1468 		}
1469 		mutex_exit(&(ntp)->nt_lock);
1470 	}
1471 	return (error);
1472 }
1473