1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/scsi/scsi.h>
30 #include <sys/vtrace.h>
31 
32 
33 #define	A_TO_TRAN(ap)	((ap)->a_hba_tran)
34 #define	P_TO_TRAN(pkt)	((pkt)->pkt_address.a_hba_tran)
35 #define	P_TO_ADDR(pkt)	(&((pkt)->pkt_address))
36 
37 /*
38  * Callback id
39  */
40 uintptr_t scsi_callback_id = 0;
41 
42 extern ddi_dma_attr_t scsi_alloc_attr;
43 
44 struct buf *
45 scsi_alloc_consistent_buf(struct scsi_address *ap,
46     struct buf *in_bp, size_t datalen, uint_t bflags,
47     int (*callback)(caddr_t), caddr_t callback_arg)
48 {
49 	dev_info_t	*pdip;
50 	struct		buf *bp;
51 	int		kmflag;
52 	size_t		rlen;
53 
54 	TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_START,
55 	    "scsi_alloc_consistent_buf_start");
56 
57 	if (!in_bp) {
58 		kmflag = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
59 		if ((bp = getrbuf(kmflag)) == NULL) {
60 			goto no_resource;
61 		}
62 	} else {
63 		bp = in_bp;
64 
65 		/* we are establishing a new buffer memory association */
66 		bp->b_flags &= ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW);
67 		bp->b_proc = NULL;
68 		bp->b_pages = NULL;
69 		bp->b_shadow = NULL;
70 	}
71 
72 	/* limit bits that can be set by bflags argument */
73 	ASSERT(!(bflags & ~(B_READ | B_WRITE)));
74 	bflags &= (B_READ | B_WRITE);
75 	bp->b_un.b_addr = 0;
76 
77 	if (datalen) {
78 		pdip = (A_TO_TRAN(ap))->tran_hba_dip;
79 
80 		/*
81 		 * use i_ddi_mem_alloc() for now until we have an interface to
82 		 * allocate memory for DMA which doesn't require a DMA handle.
83 		 * ddi_iopb_alloc() is obsolete and we want more flexibility in
84 		 * controlling the DMA address constraints.
85 		 */
86 		while (i_ddi_mem_alloc(pdip, &scsi_alloc_attr, datalen,
87 		    ((callback == SLEEP_FUNC) ? 1 : 0), 0, NULL,
88 		    &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
89 			if (callback == SLEEP_FUNC) {
90 				delay(drv_usectohz(10000));
91 			} else {
92 				if (!in_bp)
93 					freerbuf(bp);
94 				goto no_resource;
95 			}
96 		}
97 		bp->b_flags |= bflags;
98 	}
99 	bp->b_bcount = datalen;
100 	bp->b_resid = 0;
101 
102 	TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_END,
103 	    "scsi_alloc_consistent_buf_end");
104 	return (bp);
105 
106 no_resource:
107 
108 	if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
109 		ddi_set_callback(callback, callback_arg,
110 		    &scsi_callback_id);
111 	}
112 	TRACE_0(TR_FAC_SCSI_RES,
113 	    TR_SCSI_ALLOC_CONSISTENT_BUF_RETURN1_END,
114 	    "scsi_alloc_consistent_buf_end (return1)");
115 	return (NULL);
116 }
117 
118 void
119 scsi_free_consistent_buf(struct buf *bp)
120 {
121 	TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_START,
122 	    "scsi_free_consistent_buf_start");
123 	if (!bp)
124 		return;
125 	if (bp->b_un.b_addr)
126 		i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
127 	freerbuf(bp);
128 	if (scsi_callback_id != 0) {
129 		ddi_run_callback(&scsi_callback_id);
130 	}
131 	TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_END,
132 	    "scsi_free_consistent_buf_end");
133 }
134 
135 void
136 scsi_dmafree_attr(struct scsi_pkt *pktp)
137 {
138 	struct scsi_pkt_cache_wrapper *pktw =
139 	    (struct scsi_pkt_cache_wrapper *)pktp;
140 
141 	if (pktw->pcw_flags & PCW_BOUND) {
142 		if (ddi_dma_unbind_handle(pktp->pkt_handle) !=
143 		    DDI_SUCCESS)
144 			cmn_err(CE_WARN, "scsi_dmafree_attr: "
145 			    "unbind handle failed");
146 		pktw->pcw_flags &= ~PCW_BOUND;
147 	}
148 	pktp->pkt_numcookies = 0;
149 }
150 
151 struct buf *
152 scsi_pkt2bp(struct scsi_pkt *pkt)
153 {
154 	return (((struct scsi_pkt_cache_wrapper *)pkt)->pcw_bp);
155 }
156 
157 int
158 scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper *pktw,
159 			struct buf	*bp,
160 			int		 dma_flags,
161 			int		(*callback)(),
162 			caddr_t		 arg)
163 {
164 	struct scsi_pkt *pktp = &(pktw->pcw_pkt);
165 	int	 status;
166 
167 	/*
168 	 * First time, need to establish the handle.
169 	 */
170 
171 	ASSERT(pktp->pkt_numcookies == 0);
172 	ASSERT(pktw->pcw_totalwin == 0);
173 
174 	status = ddi_dma_buf_bind_handle(pktp->pkt_handle, bp, dma_flags,
175 	    callback, arg, &pktw->pcw_cookie,
176 	    &pktp->pkt_numcookies);
177 
178 	switch (status) {
179 	case DDI_DMA_MAPPED:
180 		pktw->pcw_totalwin = 1;
181 		break;
182 
183 	case DDI_DMA_PARTIAL_MAP:
184 		/* enable first call to ddi_dma_getwin */
185 		if (ddi_dma_numwin(pktp->pkt_handle,
186 		    &pktw->pcw_totalwin) != DDI_SUCCESS) {
187 			bp->b_error = 0;
188 			return (0);
189 		}
190 		break;
191 
192 	case DDI_DMA_NORESOURCES:
193 		bp->b_error = 0;
194 		return (0);
195 
196 	case DDI_DMA_TOOBIG:
197 		bioerror(bp, EINVAL);
198 		return (0);
199 
200 	case DDI_DMA_NOMAPPING:
201 	case DDI_DMA_INUSE:
202 	default:
203 		bioerror(bp, EFAULT);
204 		return (0);
205 	}
206 
207 	/* initialize the loop controls for scsi_dmaget_attr() */
208 	pktw->pcw_curwin = 0;
209 	pktw->pcw_total_xfer = 0;
210 	pktp->pkt_dma_flags = dma_flags;
211 	return (1);
212 }
213 
214 #if defined(_DMA_USES_PHYSADDR)
215 int
216 scsi_dmaget_attr(struct scsi_pkt_cache_wrapper *pktw)
217 {
218 	struct scsi_pkt *pktp = &(pktw->pcw_pkt);
219 
220 	int		status;
221 	int		num_segs = 0;
222 	ddi_dma_impl_t	*hp = (ddi_dma_impl_t *)pktp->pkt_handle;
223 	ddi_dma_cookie_t *cp;
224 
225 	if (pktw->pcw_curwin != 0) {
226 		ddi_dma_cookie_t	cookie;
227 
228 		/*
229 		 * start the next window, and get its first cookie
230 		 */
231 		status = ddi_dma_getwin(pktp->pkt_handle,
232 		    pktw->pcw_curwin, &pktp->pkt_dma_offset,
233 		    &pktp->pkt_dma_len, &cookie,
234 		    &pktp->pkt_numcookies);
235 		if (status != DDI_SUCCESS)
236 			return (0);
237 	}
238 
239 	/*
240 	 * start the Scatter/Gather loop
241 	 */
242 	cp = hp->dmai_cookie - 1;
243 	pktp->pkt_dma_len = 0;
244 	for (;;) {
245 
246 		/* take care of the loop-bookkeeping */
247 		pktp->pkt_dma_len += cp->dmac_size;
248 		num_segs++;
249 		/*
250 		 * if this was the last cookie in the current window
251 		 * set the loop controls start the next window and
252 		 * exit so the HBA can do this partial transfer
253 		 */
254 		if (num_segs >= pktp->pkt_numcookies) {
255 			pktw->pcw_curwin++;
256 			break;
257 		}
258 
259 		cp++;
260 	}
261 	pktw->pcw_total_xfer += pktp->pkt_dma_len;
262 	pktp->pkt_cookies = hp->dmai_cookie - 1;
263 	hp->dmai_cookie = cp;
264 
265 	return (1);
266 }
267 #endif
268 
269 void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
270 
271 struct scsi_pkt *
272 scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
273     struct buf *bp, int cmdlen, int statuslen, int pplen,
274     int flags, int (*callback)(caddr_t), caddr_t callback_arg)
275 {
276 	struct scsi_pkt_cache_wrapper *pktw;
277 	scsi_hba_tran_t *tranp = ap->a_hba_tran;
278 	int		(*func)(caddr_t);
279 
280 	func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
281 
282 	if (in_pktp == NULL) {
283 		int kf;
284 
285 		if (callback == SLEEP_FUNC)
286 			kf = KM_SLEEP;
287 		else
288 			kf = KM_NOSLEEP;
289 		pktw = kmem_cache_alloc(tranp->tran_pkt_cache_ptr,
290 		    kf);
291 		if (pktw == NULL)
292 			goto fail1;
293 
294 		pktw->pcw_flags = 0;
295 		in_pktp = &(pktw->pcw_pkt);
296 		in_pktp->pkt_address = *ap;
297 		/*
298 		 * target drivers should initialize pkt_comp and
299 		 * pkt_time, but sometimes they don't so initialize
300 		 * them here to be safe.
301 		 */
302 		in_pktp->pkt_address = *ap;
303 		in_pktp->pkt_flags = 0;
304 		in_pktp->pkt_time = 0;
305 		in_pktp->pkt_resid = 0;
306 		in_pktp->pkt_state = 0;
307 		in_pktp->pkt_statistics = 0;
308 		in_pktp->pkt_reason = 0;
309 		in_pktp->pkt_dma_offset = 0;
310 		in_pktp->pkt_dma_len = 0;
311 		in_pktp->pkt_dma_flags = 0;
312 		in_pktp->pkt_path_instance = 0;
313 		ASSERT(in_pktp->pkt_numcookies == 0);
314 		pktw->pcw_curwin = 0;
315 		pktw->pcw_totalwin = 0;
316 		pktw->pcw_total_xfer = 0;
317 
318 		in_pktp->pkt_cdblen = cmdlen;
319 		if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_CDB) &&
320 		    (cmdlen > DEFAULT_CDBLEN)) {
321 			pktw->pcw_flags |= PCW_NEED_EXT_CDB;
322 			in_pktp->pkt_cdbp = kmem_alloc(cmdlen, kf);
323 			if (in_pktp->pkt_cdbp == NULL)
324 				goto fail2;
325 		}
326 		in_pktp->pkt_tgtlen = pplen;
327 		if (pplen > DEFAULT_PRIVLEN) {
328 			pktw->pcw_flags |= PCW_NEED_EXT_TGT;
329 			in_pktp->pkt_private = kmem_alloc(pplen, kf);
330 			if (in_pktp->pkt_private == NULL)
331 				goto fail3;
332 		}
333 		in_pktp->pkt_scblen = statuslen;
334 		if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_SCB) &&
335 		    (statuslen > DEFAULT_SCBLEN)) {
336 			pktw->pcw_flags |= PCW_NEED_EXT_SCB;
337 			in_pktp->pkt_scbp = kmem_alloc(statuslen, kf);
338 			if (in_pktp->pkt_scbp == NULL)
339 				goto fail4;
340 		}
341 		if ((*tranp->tran_setup_pkt) (in_pktp,
342 		    func, NULL) == -1) {
343 				goto fail5;
344 		}
345 		if (cmdlen)
346 			bzero((void *)in_pktp->pkt_cdbp, cmdlen);
347 		if (pplen)
348 			bzero((void *)in_pktp->pkt_private, pplen);
349 		if (statuslen)
350 			bzero((void *)in_pktp->pkt_scbp, statuslen);
351 	} else
352 		pktw = (struct scsi_pkt_cache_wrapper *)in_pktp;
353 
354 	if (bp && bp->b_bcount) {
355 
356 		int dma_flags = 0;
357 
358 		/*
359 		 * we need to transfer data, so we alloc dma resources
360 		 * for this packet
361 		 */
362 		/*CONSTCOND*/
363 		ASSERT(SLEEP_FUNC == DDI_DMA_SLEEP);
364 		/*CONSTCOND*/
365 		ASSERT(NULL_FUNC == DDI_DMA_DONTWAIT);
366 
367 #if defined(_DMA_USES_PHYSADDR)
368 		/*
369 		 * with an IOMMU we map everything, so we don't
370 		 * need to bother with this
371 		 */
372 		if (tranp->tran_dma_attr.dma_attr_granular !=
373 		    pktw->pcw_granular) {
374 
375 			ddi_dma_free_handle(&in_pktp->pkt_handle);
376 			if (ddi_dma_alloc_handle(tranp->tran_hba_dip,
377 			    &tranp->tran_dma_attr,
378 			    func, NULL,
379 			    &in_pktp->pkt_handle) != DDI_SUCCESS) {
380 
381 				in_pktp->pkt_handle = NULL;
382 				return (NULL);
383 			}
384 			pktw->pcw_granular =
385 			    tranp->tran_dma_attr.dma_attr_granular;
386 		}
387 #endif
388 
389 		if (in_pktp->pkt_numcookies == 0) {
390 			pktw->pcw_bp = bp;
391 			/*
392 			 * set dma flags; the "read" case must be first
393 			 * since B_WRITE isn't always be set for writes.
394 			 */
395 			if (bp->b_flags & B_READ) {
396 				dma_flags |= DDI_DMA_READ;
397 			} else {
398 				dma_flags |= DDI_DMA_WRITE;
399 			}
400 			if (flags & PKT_CONSISTENT)
401 				dma_flags |= DDI_DMA_CONSISTENT;
402 			if (flags & PKT_DMA_PARTIAL)
403 				dma_flags |= DDI_DMA_PARTIAL;
404 
405 #if defined(__sparc)
406 			/*
407 			 * workaround for byte hole issue on psycho and
408 			 * schizo pre 2.1
409 			 */
410 			if ((bp->b_flags & B_READ) && ((bp->b_flags &
411 			    (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
412 			    (((uintptr_t)bp->b_un.b_addr & 0x7) ||
413 			    ((uintptr_t)bp->b_bcount & 0x7))) {
414 				dma_flags |= DDI_DMA_CONSISTENT;
415 			}
416 #endif
417 			if (!scsi_dma_buf_bind_attr(pktw, bp,
418 			    dma_flags, callback, callback_arg)) {
419 				return (NULL);
420 			} else {
421 				pktw->pcw_flags |= PCW_BOUND;
422 			}
423 		}
424 
425 #if defined(_DMA_USES_PHYSADDR)
426 		if (!scsi_dmaget_attr(pktw)) {
427 			scsi_dmafree_attr(in_pktp);
428 			goto fail5;
429 		}
430 #else
431 		in_pktp->pkt_cookies = &pktw->pcw_cookie;
432 		in_pktp->pkt_dma_len = pktw->pcw_cookie.dmac_size;
433 		pktw->pcw_total_xfer += in_pktp->pkt_dma_len;
434 #endif
435 		ASSERT(in_pktp->pkt_numcookies <=
436 		    tranp->tran_dma_attr.dma_attr_sgllen);
437 		ASSERT(pktw->pcw_total_xfer <= bp->b_bcount);
438 		in_pktp->pkt_resid = bp->b_bcount -
439 		    pktw->pcw_total_xfer;
440 
441 		ASSERT((in_pktp->pkt_resid % pktw->pcw_granular) ==
442 		    0);
443 	} else {
444 		/* !bp or no b_bcount */
445 		in_pktp->pkt_resid = 0;
446 	}
447 	return (in_pktp);
448 
449 fail5:
450 	if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
451 		kmem_free(in_pktp->pkt_scbp, statuslen);
452 		in_pktp->pkt_scbp = (opaque_t)((char *)in_pktp +
453 		    tranp->tran_hba_len + DEFAULT_PRIVLEN +
454 		    sizeof (struct scsi_pkt));
455 		if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
456 			in_pktp->pkt_scbp = (opaque_t)((in_pktp->pkt_scbp) +
457 			    DEFAULT_CDBLEN);
458 		in_pktp->pkt_scblen = 0;
459 	}
460 fail4:
461 	if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
462 		kmem_free(in_pktp->pkt_private, pplen);
463 		in_pktp->pkt_tgtlen = 0;
464 		in_pktp->pkt_private = NULL;
465 	}
466 fail3:
467 	if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
468 		kmem_free(in_pktp->pkt_cdbp, cmdlen);
469 		in_pktp->pkt_cdbp = (opaque_t)((char *)in_pktp +
470 		    tranp->tran_hba_len +
471 		    sizeof (struct scsi_pkt));
472 		in_pktp->pkt_cdblen = 0;
473 	}
474 	pktw->pcw_flags &=
475 	    ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
476 fail2:
477 	kmem_cache_free(tranp->tran_pkt_cache_ptr, pktw);
478 fail1:
479 	if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
480 		ddi_set_callback(callback, callback_arg,
481 		    &scsi_callback_id);
482 	}
483 
484 	return (NULL);
485 }
486 
487 void
488 scsi_free_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pktp)
489 {
490 	struct scsi_pkt_cache_wrapper *pktw;
491 
492 	(*A_TO_TRAN(ap)->tran_teardown_pkt)(pktp);
493 	pktw = (struct scsi_pkt_cache_wrapper *)pktp;
494 	if (pktw->pcw_flags & PCW_BOUND)
495 		scsi_dmafree_attr(pktp);
496 
497 	/*
498 	 * if we allocated memory for anything that wouldn't fit, free
499 	 * the memory and restore the pointers
500 	 */
501 	if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
502 		kmem_free(pktp->pkt_scbp, pktp->pkt_scblen);
503 		pktp->pkt_scbp = (opaque_t)((char *)pktp +
504 		    (A_TO_TRAN(ap))->tran_hba_len +
505 		    DEFAULT_PRIVLEN + sizeof (struct scsi_pkt_cache_wrapper));
506 		if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
507 			pktp->pkt_scbp = (opaque_t)((pktp->pkt_scbp) +
508 			    DEFAULT_CDBLEN);
509 		pktp->pkt_scblen = 0;
510 	}
511 	if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
512 		kmem_free(pktp->pkt_private, pktp->pkt_tgtlen);
513 		pktp->pkt_tgtlen = 0;
514 		pktp->pkt_private = NULL;
515 	}
516 	if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
517 		kmem_free(pktp->pkt_cdbp, pktp->pkt_cdblen);
518 		pktp->pkt_cdbp = (opaque_t)((char *)pktp +
519 		    (A_TO_TRAN(ap))->tran_hba_len +
520 		    sizeof (struct scsi_pkt_cache_wrapper));
521 		pktp->pkt_cdblen = 0;
522 	}
523 	pktw->pcw_flags &=
524 	    ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
525 	kmem_cache_free(A_TO_TRAN(ap)->tran_pkt_cache_ptr, pktw);
526 
527 	if (scsi_callback_id != 0) {
528 		ddi_run_callback(&scsi_callback_id);
529 	}
530 
531 }
532 
533 
534 struct scsi_pkt *
535 scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
536     struct buf *bp, int cmdlen, int statuslen, int pplen,
537     int flags, int (*callback)(caddr_t), caddr_t callback_arg)
538 {
539 	struct scsi_pkt *pktp;
540 	scsi_hba_tran_t *tranp = ap->a_hba_tran;
541 	int		(*func)(caddr_t);
542 
543 	TRACE_5(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_START,
544 "scsi_init_pkt_start: addr %p in_pktp %p cmdlen %d statuslen %d pplen %d",
545 	    ap, in_pktp, cmdlen, statuslen, pplen);
546 
547 #if defined(__i386) || defined(__amd64)
548 	if (flags & PKT_CONSISTENT_OLD) {
549 		flags &= ~PKT_CONSISTENT_OLD;
550 		flags |= PKT_CONSISTENT;
551 	}
552 #endif
553 
554 	func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
555 
556 	pktp = (*tranp->tran_init_pkt) (ap, in_pktp, bp, cmdlen,
557 	    statuslen, pplen, flags, func, NULL);
558 	if (pktp == NULL) {
559 		if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
560 			ddi_set_callback(callback, callback_arg,
561 			    &scsi_callback_id);
562 		}
563 	}
564 
565 	TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_END,
566 	    "scsi_init_pkt_end: pktp %p", pktp);
567 	return (pktp);
568 }
569 
570 void
571 scsi_destroy_pkt(struct scsi_pkt *pkt)
572 {
573 	struct scsi_address	*ap = P_TO_ADDR(pkt);
574 
575 	TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_START,
576 	    "scsi_destroy_pkt_start: pkt %p", pkt);
577 
578 	(*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
579 
580 	if (scsi_callback_id != 0) {
581 		ddi_run_callback(&scsi_callback_id);
582 	}
583 
584 	TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_END,
585 	    "scsi_destroy_pkt_end");
586 }
587 
588 
589 /*
590  *	Generic Resource Allocation Routines
591  */
592 
593 struct scsi_pkt *
594 scsi_resalloc(struct scsi_address *ap, int cmdlen, int statuslen,
595     opaque_t dmatoken, int (*callback)())
596 {
597 	register struct	scsi_pkt *pkt;
598 	register scsi_hba_tran_t *tranp = ap->a_hba_tran;
599 	register int			(*func)(caddr_t);
600 
601 	func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
602 
603 	pkt = (*tranp->tran_init_pkt) (ap, NULL, (struct buf *)dmatoken,
604 	    cmdlen, statuslen, 0, 0, func, NULL);
605 	if (pkt == NULL) {
606 		if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
607 			ddi_set_callback(callback, NULL, &scsi_callback_id);
608 		}
609 	}
610 
611 	return (pkt);
612 }
613 
614 struct scsi_pkt *
615 scsi_pktalloc(struct scsi_address *ap, int cmdlen, int statuslen,
616     int (*callback)())
617 {
618 	struct scsi_pkt		*pkt;
619 	struct scsi_hba_tran	*tran = ap->a_hba_tran;
620 	register int			(*func)(caddr_t);
621 
622 	func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
623 
624 	pkt = (*tran->tran_init_pkt) (ap, NULL, NULL, cmdlen,
625 	    statuslen, 0, 0, func, NULL);
626 	if (pkt == NULL) {
627 		if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
628 			ddi_set_callback(callback, NULL, &scsi_callback_id);
629 		}
630 	}
631 
632 	return (pkt);
633 }
634 
635 struct scsi_pkt *
636 scsi_dmaget(struct scsi_pkt *pkt, opaque_t dmatoken, int (*callback)())
637 {
638 	struct scsi_pkt		*new_pkt;
639 	register int		(*func)(caddr_t);
640 
641 	func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
642 
643 	new_pkt = (*P_TO_TRAN(pkt)->tran_init_pkt) (&pkt->pkt_address,
644 	    pkt, (struct buf *)dmatoken,
645 	    0, 0, 0, 0, func, NULL);
646 	ASSERT(new_pkt == pkt || new_pkt == NULL);
647 	if (new_pkt == NULL) {
648 		if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
649 			ddi_set_callback(callback, NULL, &scsi_callback_id);
650 		}
651 	}
652 
653 	return (new_pkt);
654 }
655 
656 
657 /*
658  *	Generic Resource Deallocation Routines
659  */
660 
661 void
662 scsi_dmafree(struct scsi_pkt *pkt)
663 {
664 	register struct scsi_address	*ap = P_TO_ADDR(pkt);
665 
666 	(*A_TO_TRAN(ap)->tran_dmafree)(ap, pkt);
667 
668 	if (scsi_callback_id != 0) {
669 		ddi_run_callback(&scsi_callback_id);
670 	}
671 }
672 
673 /*ARGSUSED*/
674 void
675 scsi_cache_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
676 {
677 	ASSERT(pkt->pkt_numcookies == 0);
678 	ASSERT(pkt->pkt_handle != NULL);
679 	scsi_dmafree_attr(pkt);
680 
681 	if (scsi_callback_id != 0) {
682 		ddi_run_callback(&scsi_callback_id);
683 	}
684 }
685 
686 void
687 scsi_sync_pkt(struct scsi_pkt *pkt)
688 {
689 	register struct scsi_address	*ap = P_TO_ADDR(pkt);
690 
691 	if (pkt->pkt_state & STATE_XFERRED_DATA)
692 		(*A_TO_TRAN(ap)->tran_sync_pkt)(ap, pkt);
693 }
694 
695 /*ARGSUSED*/
696 void
697 scsi_sync_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
698 {
699 	if (pkt->pkt_handle &&
700 	    (pkt->pkt_dma_flags & (DDI_DMA_WRITE | DDI_DMA_READ))) {
701 		(void) ddi_dma_sync(pkt->pkt_handle,
702 		    pkt->pkt_dma_offset, pkt->pkt_dma_len,
703 		    (pkt->pkt_dma_flags & DDI_DMA_WRITE) ?
704 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
705 	}
706 }
707 
708 void
709 scsi_resfree(struct scsi_pkt *pkt)
710 {
711 	register struct scsi_address	*ap = P_TO_ADDR(pkt);
712 	(*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
713 
714 	if (scsi_callback_id != 0) {
715 		ddi_run_callback(&scsi_callback_id);
716 	}
717 }
718