xref: /illumos-gate/usr/src/uts/common/io/1394/adapters/hci1394_ixl_comp.c (revision ffc2b7d4ae69a2eeeab283452dc5c0d70ce7519f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * hci1394_ixl_comp.c
29  *    Isochronous IXL Compiler.
30  *    The compiler converts the general hardware independent IXL command
31  *    blocks into OpenHCI DMA descriptors.
32  */
33 
34 #include <sys/kmem.h>
35 #include <sys/types.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 
40 #include <sys/tnf_probe.h>
41 
42 #include <sys/1394/h1394.h>
43 #include <sys/1394/ixl1394.h>
44 #include <sys/1394/adapters/hci1394.h>
45 
46 /* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
47 #define	HCI1394_IXL_PAGESIZE	8000
48 
49 /* invalid opcode */
50 #define	IXL1394_OP_INVALID  (0 | IXL1394_OPTY_OTHER)
51 
52 /*
53  * maximum number of interrupts permitted for a single context in which
54  * the context does not advance to the next DMA descriptor.  Interrupts are
55  * triggered by 1) hardware completing a DMA descriptor block which has the
56  * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
57  * interrupt.  Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
58  * returned.
59  */
60 int hci1394_ixl_max_noadv_intrs = 8;
61 
62 
63 static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
64     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
65     ixl1394_command_t *ixlp);
66 static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
67 static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
68     ixl1394_command_t *ixlp);
69 static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
70 static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
71 static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
72 static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
73 static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
74 static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
75 static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
76 static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
77 static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
78     caddr_t *dma_descpp, uint32_t *dma_desc_bound);
79 static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
80 static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
81 static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
82 static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
83     uint32_t bufp, uint16_t size);
84 static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
85     uint32_t count);
86 static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
87 static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
88 static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
89     uint32_t dmacnt);
90 static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
91     uint32_t size, uint32_t *dma_bound);
92 static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
93 
94 
95 /*
96  * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
97  * Receive Only:			Transmit Only:
98  *    IXL1394_OP_RECV_PKT_ST		    IXL1394_OP_SEND_PKT_WHDR_ST
99  *    IXL1394_OP_RECV_PKT		    IXL1394_OP_SEND_PKT_ST
100  *    IXL1394_OP_RECV_BUF		    IXL1394_OP_SEND_PKT
101  *    IXL1394_OP_SET_SYNCWAIT		    IXL1394_OP_SEND_BUF
102  *					    IXL1394_OP_SEND_HDR_ONLY
103  * Receive or Transmit:			    IXL1394_OP_SEND_NO_PKT
104  *    IXL1394_OP_CALLBACK		    IXL1394_OP_SET_TAGSYNC
105  *    IXL1394_OP_LABEL			    IXL1394_OP_SET_SKIPMODE
106  *    IXL1394_OP_JUMP			    IXL1394_OP_STORE_TIMESTAMP
107  */
108 
109 /*
110  * hci1394_compile_ixl()
111  *    Top level ixl compiler entry point.  Scans ixl and builds openHCI 1.0
112  *    descriptor blocks in dma memory.
113  */
114 int
115 hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
116     ixl1394_command_t *ixlp, int *resultp)
117 {
118 	hci1394_comp_ixl_vars_t wv;	/* working variables used throughout */
119 
120 	ASSERT(soft_statep != NULL);
121 	ASSERT(ctxtp != NULL);
122 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_enter,
123 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
124 
125 	/* Initialize compiler working variables */
126 	hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
127 
128 	/*
129 	 * First pass:
130 	 *    Parse ixl commands, building desc blocks, until end of IXL
131 	 *    linked list.
132 	 */
133 	hci1394_parse_ixl(&wv, ixlp);
134 
135 	/*
136 	 * Second pass:
137 	 *    Resolve all generated descriptor block jump and skip addresses.
138 	 *    Set interrupt enable in descriptor blocks which have callback
139 	 *    operations in their execution scope. (Previously store_timesamp
140 	 *    operations were counted also.) Set interrupt enable in descriptor
141 	 *    blocks which were introduced by an ixl label command.
142 	 */
143 	if (wv.dma_bld_error == 0) {
144 		hci1394_finalize_all_xfer_desc(&wv);
145 	}
146 
147 	/* Endup: finalize and cleanup ixl compile, return result */
148 	hci1394_compile_ixl_endup(&wv);
149 
150 	*resultp = wv.dma_bld_error;
151 	if (*resultp != 0) {
152 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
153 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
154 		return (DDI_FAILURE);
155 	} else {
156 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
157 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
158 		return (DDI_SUCCESS);
159 	}
160 }
161 
162 /*
163  * hci1394_compile_ixl_init()
164  *    Initialize the isoch context structure associated with the IXL
165  *    program, and initialize the temporary working variables structure.
166  */
167 static void
168 hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
169     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
170     ixl1394_command_t *ixlp)
171 {
172 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_enter,
173 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
174 
175 	/* initialize common recv/xmit compile values */
176 	wvp->soft_statep = soft_statep;
177 	wvp->ctxtp = ctxtp;
178 
179 	/* init/clear ctxtp values */
180 	ctxtp->dma_mem_execp = 0;
181 	ctxtp->dma_firstp = NULL;
182 	ctxtp->dma_last_time = 0;
183 	ctxtp->xcs_firstp = NULL;
184 	ctxtp->ixl_exec_depth = 0;
185 	ctxtp->ixl_execp = NULL;
186 	ctxtp->ixl_firstp = ixlp;
187 	ctxtp->default_skipxferp = NULL;
188 
189 	/*
190 	 * the context's max_noadv_intrs is set here instead of in isoch init
191 	 * because the default is patchable and would only be picked up this way
192 	 */
193 	ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
194 
195 	/* init working variables */
196 	wvp->xcs_firstp = NULL;
197 	wvp->xcs_currentp = NULL;
198 
199 	wvp->dma_firstp = NULL;
200 	wvp->dma_currentp = NULL;
201 	wvp->dma_bld_error = 0;
202 
203 	wvp->ixl_io_mode = ctxtp->ctxt_flags;
204 	wvp->ixl_cur_cmdp = NULL;
205 	wvp->ixl_cur_xfer_stp = NULL;
206 	wvp->ixl_cur_labelp = NULL;
207 
208 	wvp->ixl_xfer_st_cnt = 0;	/* count of xfer start commands found */
209 	wvp->xfer_state = XFER_NONE;	/* none, pkt, buf, skip, hdronly */
210 	wvp->xfer_hci_flush = 0;	/* updateable - xfer, jump, set */
211 	wvp->xfer_pktlen = 0;
212 	wvp->xfer_bufcnt = 0;
213 	wvp->descriptors = 0;
214 
215 	/* START RECV ONLY SECTION */
216 	wvp->ixl_setsyncwait_cnt = 0;
217 
218 	/* START XMIT ONLY SECTION */
219 	wvp->ixl_settagsync_cmdp = NULL;
220 	wvp->ixl_setskipmode_cmdp = NULL;
221 	wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
222 	wvp->default_skiplabelp = ctxtp->default_skiplabelp;
223 	wvp->default_skipxferp = NULL;
224 	wvp->skipmode = ctxtp->default_skipmode;
225 	wvp->skiplabelp = NULL;
226 	wvp->skipxferp = NULL;
227 	wvp->default_tag = ctxtp->default_tag;
228 	wvp->default_sync = ctxtp->default_sync;
229 	wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
230 	wvp->storevalue_data = 0;
231 	wvp->xmit_pkthdr1 = 0;
232 	wvp->xmit_pkthdr2 = 0;
233 	/* END XMIT ONLY SECTION */
234 
235 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_exit,
236 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
237 }
238 
239 /*
240  * hci1394_compile_ixl_endup()
241  *    This routine is called just before the main hci1394_compile_ixl() exits.
242  *    It checks for errors and performs the appropriate cleanup, or it rolls any
243  *    relevant info from the working variables struct into the context structure
244  */
245 static void
246 hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
247 {
248 	ixl1394_command_t *ixl_exec_stp;
249 	hci1394_idma_desc_mem_t *dma_nextp;
250 	int err;
251 
252 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_enter,
253 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
254 
255 	/* error if no descriptor blocks found in ixl & created in dma memory */
256 	if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
257 		TNF_PROBE_1(hci1394_compile_ixl_endup_nodata_error,
258 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
259 		    "IXL1394_ENO_DATA_PKTS: prog has no data packets");
260 
261 		wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
262 	}
263 
264 	/* if no errors yet, find the first IXL command that's a transfer cmd */
265 	if (wvp->dma_bld_error == 0) {
266 		err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
267 		    NULL, &ixl_exec_stp);
268 
269 		/* error if a label<->jump loop, or no xfer */
270 		if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
271 			TNF_PROBE_1(hci1394_compile_ixl_endup_error,
272 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
273 			    "IXL1394_ENO_DATA_PKTS: loop or no xfer detected");
274 
275 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
276 		}
277 	}
278 
279 	/* Sync all the DMA descriptor buffers */
280 	dma_nextp = wvp->ctxtp->dma_firstp;
281 	while (dma_nextp != NULL) {
282 		err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
283 		    (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
284 		    DDI_DMA_SYNC_FORDEV);
285 		if (err != DDI_SUCCESS) {
286 			wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
287 
288 			TNF_PROBE_1(hci1394_compile_ixl_endup_error,
289 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
290 			    "IXL1394_INTERNAL_ERROR: dma_sync() failed");
291 			break;
292 		}
293 
294 		/* advance to next dma memory descriptor */
295 		dma_nextp = dma_nextp->dma_nextp;
296 	}
297 
298 	/*
299 	 * If error, cleanup and return. delete all allocated xfer_ctl structs
300 	 * and all dma descriptor page memory and its dma memory blocks too.
301 	 */
302 	if (wvp->dma_bld_error != 0) {
303 		wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
304 		wvp->ctxtp->dma_firstp = wvp->dma_firstp;
305 		hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
306 
307 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
308 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
309 		return;
310 	}
311 
312 	/* can only get to here if the first ixl transfer command is found */
313 
314 	/* set required processing vars into ctxtp struct */
315 	wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
316 	wvp->ctxtp->dma_mem_execp = 0;
317 
318 	/*
319 	 * the transfer command's compiler private xfer_ctl structure has the
320 	 * appropriate bound address
321 	 */
322 	wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
323 	    ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
324 	wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
325 	wvp->ctxtp->dma_firstp = wvp->dma_firstp;
326 	wvp->ctxtp->dma_last_time = 0;
327 	wvp->ctxtp->ixl_exec_depth = 0;
328 	wvp->ctxtp->ixl_execp = NULL;
329 
330 	/* compile done */
331 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
332 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
333 }
334 
335 /*
336  * hci1394_parse_ixl()
337  *    Scan IXL program and build ohci DMA descriptor blocks in dma memory.
338  *
339  *    Parse/process succeeding ixl commands until end of IXL linked list is
340  *    reached. Evaluate ixl syntax and build (xmit or recv) descriptor
341  *    blocks.  To aid execution time evaluation of current location, enable
342  *    status recording on each descriptor block built.
343  *    On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
344  */
345 static void
346 hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
347 {
348 	ixl1394_command_t *ixlnextp = ixlp;	/* addr of next ixl cmd */
349 	ixl1394_command_t *ixlcurp = NULL;	/* addr of current ixl cmd */
350 	uint16_t ixlopcode = 0;			/* opcode of currnt ixl cmd */
351 
352 	uint32_t pktsize;
353 	uint32_t pktcnt;
354 
355 	TNF_PROBE_0_DEBUG(hci1394_parse_ixl_enter, HCI1394_TNF_HAL_STACK_ISOCH,
356 	    "");
357 
358 	/* follow ixl links until reach end or find error */
359 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
360 
361 		/* set this command as the current ixl command */
362 		wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
363 		ixlnextp = ixlcurp->next_ixlp;
364 
365 		ixlopcode = ixlcurp->ixl_opcode;
366 
367 		/* init compiler controlled values in current ixl command */
368 		ixlcurp->compiler_privatep = NULL;
369 		ixlcurp->compiler_resv = 0;
370 
371 		/* error if xmit/recv mode not appropriate for current cmd */
372 		if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
373 		    ((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
374 		    (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
375 		    ((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
376 
377 			/* check if command op failed because it was invalid */
378 			if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
379 				TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
380 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
381 				    errmsg, "IXL1394_BAD_IXL_OPCODE",
382 				    tnf_opaque, ixl_commandp, ixlcurp,
383 				    tnf_opaque, ixl_opcode, ixlopcode);
384 
385 				wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
386 			} else {
387 				TNF_PROBE_3(hci1394_parse_ixl_mode_error,
388 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
389 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
390 				    "invalid ixlop in mode", tnf_uint, io_mode,
391 				    wvp->ixl_io_mode, tnf_opaque, ixl_opcode,
392 				    ixlopcode);
393 
394 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
395 			}
396 			continue;
397 		}
398 
399 		/*
400 		 * if ends xfer flag set, finalize current xfer descriptor
401 		 * block build
402 		 */
403 		if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
404 			/* finalize any descriptor block build in progress */
405 			hci1394_finalize_cur_xfer_desc(wvp);
406 
407 			if (wvp->dma_bld_error != 0) {
408 				continue;
409 			}
410 		}
411 
412 		/*
413 		 * now process based on specific opcode value
414 		 */
415 		switch (ixlopcode) {
416 
417 		case IXL1394_OP_RECV_BUF:
418 		case IXL1394_OP_RECV_BUF_U: {
419 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
420 
421 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
422 
423 			/*
424 			 * In packet-per-buffer mode:
425 			 *    This ixl command builds a collection of xfer
426 			 *    descriptor blocks (size/pkt_size of them) each to
427 			 *    recv a packet whose buffer size is pkt_size and
428 			 *    whose buffer ptr is (pktcur*pkt_size + bufp)
429 			 *
430 			 * In buffer fill mode:
431 			 *    This ixl command builds a single xfer descriptor
432 			 *    block to recv as many packets or parts of packets
433 			 *    as can fit into the buffer size specified
434 			 *    (pkt_size is not used).
435 			 */
436 
437 			/* set xfer_state for new descriptor block build */
438 			wvp->xfer_state = XFER_BUF;
439 
440 			/* set this ixl command as current xferstart command */
441 			wvp->ixl_cur_xfer_stp = ixlcurp;
442 
443 			/*
444 			 * perform packet-per-buffer checks
445 			 * (no checks needed when in buffer fill mode)
446 			 */
447 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
448 
449 				/* the packets must use the buffer exactly */
450 				pktsize = cur_xfer_buf_ixlp->pkt_size;
451 				pktcnt = 0;
452 				if (pktsize != 0) {
453 					pktcnt = cur_xfer_buf_ixlp->size /
454 					    pktsize;
455 				}
456 				if ((pktcnt == 0) || ((pktsize * pktcnt) !=
457 				    cur_xfer_buf_ixlp->size)) {
458 
459 					TNF_PROBE_3(hci1394_parse_ixl_rat_error,
460 					    HCI1394_TNF_HAL_ERROR_ISOCH, "",
461 					    tnf_string, errmsg,
462 					    "IXL1394_EPKTSIZE_RATIO", tnf_int,
463 					    buf_size, cur_xfer_buf_ixlp->size,
464 					    tnf_int, pkt_size, pktsize);
465 
466 					wvp->dma_bld_error =
467 					    IXL1394_EPKTSIZE_RATIO;
468 					continue;
469 				}
470 			}
471 
472 			/*
473 			 * set buffer pointer & size into first xfer_bufp
474 			 * and xfer_size
475 			 */
476 			if (hci1394_set_next_xfer_buf(wvp,
477 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
478 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
479 
480 				/* wvp->dma_bld_error is set by above call */
481 				continue;
482 			}
483 			break;
484 		}
485 
486 		case IXL1394_OP_RECV_PKT_ST:
487 		case IXL1394_OP_RECV_PKT_ST_U: {
488 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
489 
490 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
491 
492 			/* error if in buffer fill mode */
493 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
494 				TNF_PROBE_1(hci1394_parse_ixl_mode_error,
495 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
496 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
497 				    "RECV_PKT_ST used in BFFILL mode");
498 
499 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
500 				continue;
501 			}
502 
503 			/* set xfer_state for new descriptor block build */
504 			/* set this ixl command as current xferstart command */
505 			wvp->xfer_state = XFER_PKT;
506 			wvp->ixl_cur_xfer_stp = ixlcurp;
507 
508 			/*
509 			 * set buffer pointer & size into first xfer_bufp
510 			 * and xfer_size
511 			 */
512 			if (hci1394_set_next_xfer_buf(wvp,
513 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
514 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
515 
516 				/* wvp->dma_bld_error is set by above call */
517 				continue;
518 			}
519 			break;
520 		}
521 
522 		case IXL1394_OP_RECV_PKT:
523 		case IXL1394_OP_RECV_PKT_U: {
524 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
525 
526 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
527 
528 			/* error if in buffer fill mode */
529 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
530 				TNF_PROBE_1(hci1394_parse_ixl_mode_error,
531 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
532 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
533 				    "RECV_PKT_ST used in BFFILL mode");
534 
535 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
536 				continue;
537 			}
538 
539 			/* error if xfer_state not xfer pkt */
540 			if (wvp->xfer_state != XFER_PKT) {
541 				TNF_PROBE_1(hci1394_parse_ixl_misplacercv_error,
542 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
543 				    errmsg, "IXL1394_EMISPLACED_RECV: "
544 				    "RECV_PKT without RECV_PKT_ST");
545 
546 				wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
547 				continue;
548 			}
549 
550 			/*
551 			 * save xfer start cmd ixl ptr in compiler_privatep
552 			 * field of this cmd
553 			 */
554 			ixlcurp->compiler_privatep = (void *)
555 			    wvp->ixl_cur_xfer_stp;
556 
557 			/*
558 			 * save pkt index [1-n] in compiler_resv field of
559 			 * this cmd
560 			 */
561 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
562 
563 			/*
564 			 * set buffer pointer & size into next xfer_bufp
565 			 * and xfer_size
566 			 */
567 			if (hci1394_set_next_xfer_buf(wvp,
568 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
569 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
570 
571 				/* wvp->dma_bld_error is set by above call */
572 				continue;
573 			}
574 
575 			/*
576 			 * set updateable xfer cache flush eval flag if
577 			 * updateable opcode
578 			 */
579 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
580 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
581 			}
582 			break;
583 		}
584 
585 		case IXL1394_OP_SEND_BUF:
586 		case IXL1394_OP_SEND_BUF_U: {
587 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
588 
589 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
590 
591 			/*
592 			 * These send_buf commands build a collection of xmit
593 			 * descriptor blocks (size/pkt_size of them) each to
594 			 * xfer a packet whose buffer size is pkt_size and whose
595 			 * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
596 			 * are adjusted if they have header form of ixl cmd)
597 			 */
598 
599 			/* set xfer_state for new descriptor block build */
600 			wvp->xfer_state = XFER_BUF;
601 
602 			/* set this ixl command as current xferstart command */
603 			wvp->ixl_cur_xfer_stp = ixlcurp;
604 
605 			/* the packets must use the buffer exactly,else error */
606 			pktsize = cur_xfer_buf_ixlp->pkt_size;
607 			pktcnt = 0;
608 			if (pktsize != 0) {
609 				pktcnt = cur_xfer_buf_ixlp->size / pktsize;
610 			}
611 			if ((pktcnt == 0) || ((pktsize * pktcnt) !=
612 			    cur_xfer_buf_ixlp->size)) {
613 
614 				TNF_PROBE_3(hci1394_parse_ixl_rat_error,
615 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
616 				    errmsg, "IXL1394_EPKTSIZE_RATIO", tnf_int,
617 				    buf_size, cur_xfer_buf_ixlp->size, tnf_int,
618 				    pkt_size, pktsize);
619 
620 				wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
621 				continue;
622 			}
623 
624 			/* set buf ptr & size into 1st xfer_bufp & xfer_size */
625 			if (hci1394_set_next_xfer_buf(wvp,
626 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
627 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
628 
629 				/* wvp->dma_bld_error is set by above call */
630 				continue;
631 			}
632 			break;
633 		}
634 
635 		case IXL1394_OP_SEND_PKT_ST:
636 		case IXL1394_OP_SEND_PKT_ST_U: {
637 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
638 
639 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
640 
641 			/* set xfer_state for new descriptor block build */
642 			/* set this ixl command as current xferstart command */
643 			wvp->xfer_state = XFER_PKT;
644 			wvp->ixl_cur_xfer_stp = ixlcurp;
645 
646 			/*
647 			 * set buffer pointer & size into first xfer_bufp and
648 			 * xfer_size
649 			 */
650 			if (hci1394_set_next_xfer_buf(wvp,
651 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
652 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
653 
654 				/* wvp->dma_bld_error is set by above call */
655 				continue;
656 			}
657 			break;
658 		}
659 
660 		case IXL1394_OP_SEND_PKT_WHDR_ST:
661 		case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
662 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
663 
664 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
665 
666 			/* set xfer_state for new descriptor block build */
667 			/* set this ixl command as current xferstart command */
668 			wvp->xfer_state = XFER_PKT;
669 			wvp->ixl_cur_xfer_stp = ixlcurp;
670 
671 			/*
672 			 * buffer size must be at least 4 (must include header),
673 			 * else error
674 			 */
675 			if (cur_xfer_pkt_ixlp->size < 4) {
676 				TNF_PROBE_2(hci1394_parse_ixl_hdr_missing_error,
677 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
678 				    errmsg, "IXL1394_EPKT_HDR_MISSING", tnf_int,
679 				    pkt_size, cur_xfer_pkt_ixlp->size);
680 
681 				wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
682 				continue;
683 			}
684 
685 			/*
686 			 * set buffer and size(excluding header) into first
687 			 * xfer_bufp and xfer_size
688 			 */
689 			if (hci1394_set_next_xfer_buf(wvp,
690 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
691 			    cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
692 
693 				/* wvp->dma_bld_error is set by above call */
694 				continue;
695 			}
696 			break;
697 		}
698 
699 		case IXL1394_OP_SEND_PKT:
700 		case IXL1394_OP_SEND_PKT_U: {
701 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
702 
703 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
704 
705 			/* error if xfer_state not xfer pkt */
706 			if (wvp->xfer_state != XFER_PKT) {
707 				TNF_PROBE_1(hci1394_parse_ixl_misplacesnd_error,
708 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
709 				    errmsg, "IXL1394_EMISPLACED_SEND: SEND_PKT "
710 				    "without SEND_PKT_ST");
711 
712 				wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
713 				continue;
714 			}
715 
716 			/*
717 			 * save xfer start cmd ixl ptr in compiler_privatep
718 			 * field of this cmd
719 			 */
720 			ixlcurp->compiler_privatep = (void *)
721 			    wvp->ixl_cur_xfer_stp;
722 
723 			/*
724 			 * save pkt index [1-n] in compiler_resv field of this
725 			 * cmd
726 			 */
727 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
728 
729 			/*
730 			 * set buffer pointer & size into next xfer_bufp
731 			 * and xfer_size
732 			 */
733 			if (hci1394_set_next_xfer_buf(wvp,
734 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
735 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
736 
737 				/* wvp->dma_bld_error is set by above call */
738 				continue;
739 			}
740 
741 			/*
742 			 * set updateable xfer cache flush eval flag if
743 			 * updateable opcode
744 			 */
745 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
746 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
747 			}
748 			break;
749 		}
750 
751 		case IXL1394_OP_SEND_HDR_ONLY:
752 			/* set xfer_state for new descriptor block build */
753 			wvp->xfer_state = XMIT_HDRONLY;
754 
755 			/* set this ixl command as current xferstart command */
756 			wvp->ixl_cur_xfer_stp = ixlcurp;
757 			break;
758 
759 		case IXL1394_OP_SEND_NO_PKT:
760 			/* set xfer_state for new descriptor block build */
761 			wvp->xfer_state = XMIT_NOPKT;
762 
763 			/* set this ixl command as current xferstart command */
764 			wvp->ixl_cur_xfer_stp = ixlcurp;
765 			break;
766 
767 		case IXL1394_OP_JUMP:
768 		case IXL1394_OP_JUMP_U: {
769 			ixl1394_jump_t *cur_jump_ixlp;
770 
771 			cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
772 
773 			/*
774 			 * verify label indicated by IXL1394_OP_JUMP is
775 			 * actually an IXL1394_OP_LABEL or NULL
776 			 */
777 			if ((cur_jump_ixlp->label != NULL) &&
778 			    (cur_jump_ixlp->label->ixl_opcode !=
779 			    IXL1394_OP_LABEL)) {
780 				TNF_PROBE_3(hci1394_parse_ixl_jumplabel_error,
781 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
782 				    errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
783 				    tnf_opaque, jumpixl_commandp, ixlcurp,
784 				    tnf_opaque, jumpto_ixl,
785 				    cur_jump_ixlp->label);
786 
787 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
788 				continue;
789 			}
790 			break;
791 		}
792 
793 		case IXL1394_OP_LABEL:
794 			/*
795 			 * save current ixl label command for xfer cmd
796 			 * finalize processing
797 			 */
798 			wvp->ixl_cur_labelp = ixlcurp;
799 
800 			/* set initiating label flag to cause cache flush */
801 			wvp->xfer_hci_flush |= INITIATING_LBL;
802 			break;
803 
804 		case IXL1394_OP_CALLBACK:
805 		case IXL1394_OP_CALLBACK_U:
806 		case IXL1394_OP_STORE_TIMESTAMP:
807 			/*
808 			 * these commands are accepted during compile,
809 			 * processed during execution (interrupt handling)
810 			 * No further processing is needed here.
811 			 */
812 			break;
813 
814 		case IXL1394_OP_SET_SKIPMODE:
815 		case IXL1394_OP_SET_SKIPMODE_U:
816 			/*
817 			 * Error if already have a set skipmode cmd for
818 			 * this xfer
819 			 */
820 			if (wvp->ixl_setskipmode_cmdp != NULL) {
821 				TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
822 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
823 				    errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
824 				    " duplicate set skipmode", tnf_opaque,
825 				    ixl_commandp, ixlcurp);
826 
827 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
828 				continue;
829 			}
830 
831 			/* save skip mode ixl command and verify skipmode */
832 			wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
833 			    ixlcurp;
834 
835 			if ((wvp->ixl_setskipmode_cmdp->skipmode !=
836 			    IXL1394_SKIP_TO_NEXT) &&
837 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
838 			    IXL1394_SKIP_TO_SELF) &&
839 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
840 			    IXL1394_SKIP_TO_STOP) &&
841 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
842 			    IXL1394_SKIP_TO_LABEL)) {
843 
844 				TNF_PROBE_3(hci1394_parse_ixl_dup_set_error,
845 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
846 				    errmsg, "IXL EBAD_SKIPMODE", tnf_opaque,
847 				    ixl_commandp, ixlcurp, tnf_int, skip,
848 				    wvp->ixl_setskipmode_cmdp->skipmode);
849 
850 				wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
851 				continue;
852 			}
853 
854 			/*
855 			 * if mode is IXL1394_SKIP_TO_LABEL, verify label
856 			 * references an IXL1394_OP_LABEL
857 			 */
858 			if ((wvp->ixl_setskipmode_cmdp->skipmode ==
859 			    IXL1394_SKIP_TO_LABEL) &&
860 			    ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
861 			    (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
862 			    IXL1394_OP_LABEL))) {
863 
864 				TNF_PROBE_3(hci1394_parse_ixl_jump_error,
865 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
866 				    errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
867 				    tnf_opaque, jumpixl_commandp, ixlcurp,
868 				    tnf_opaque, jumpto_ixl,
869 				    wvp->ixl_setskipmode_cmdp->label);
870 
871 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
872 				continue;
873 			}
874 			/*
875 			 * set updateable set cmd cache flush eval flag if
876 			 * updateable opcode
877 			 */
878 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
879 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
880 			}
881 			break;
882 
883 		case IXL1394_OP_SET_TAGSYNC:
884 		case IXL1394_OP_SET_TAGSYNC_U:
885 			/*
886 			 * is an error if already have a set tag and sync cmd
887 			 * for this xfer
888 			 */
889 			if (wvp->ixl_settagsync_cmdp != NULL) {
890 				TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
891 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
892 				    errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
893 				    " duplicate set tagsync", tnf_opaque,
894 				    ixl_commandp, ixlcurp);
895 
896 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
897 				continue;
898 			}
899 
900 			/* save ixl command containing tag and sync values */
901 			wvp->ixl_settagsync_cmdp =
902 			    (ixl1394_set_tagsync_t *)ixlcurp;
903 
904 			/*
905 			 * set updateable set cmd cache flush eval flag if
906 			 * updateable opcode
907 			 */
908 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
909 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
910 			}
911 			break;
912 
913 		case IXL1394_OP_SET_SYNCWAIT:
914 			/*
915 			 * count ixl wait-for-sync commands since last
916 			 * finalize ignore multiple occurrences for same xfer
917 			 * command
918 			 */
919 			wvp->ixl_setsyncwait_cnt++;
920 			break;
921 
922 		default:
923 			/* error - unknown/unimplemented ixl command */
924 			TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
925 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
926 			    "IXL1394_BAD_IXL_OPCODE", tnf_opaque, ixl_commandp,
927 			    ixlcurp, tnf_opaque, ixl_opcode, ixlopcode);
928 
929 			wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
930 			continue;
931 		}
932 	} /* while */
933 
934 	/* finalize any last descriptor block build */
935 	wvp->ixl_cur_cmdp = NULL;
936 	if (wvp->dma_bld_error == 0) {
937 		hci1394_finalize_cur_xfer_desc(wvp);
938 	}
939 
940 	TNF_PROBE_0_DEBUG(hci1394_parse_ixl_exit,
941 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
942 }
943 
944 /*
945  * hci1394_finalize_all_xfer_desc()
946  *    Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
947  *
948  *    Set interrupt enable on first descriptor block associated with current
949  *    xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
950  *
951  *    Set interrupt enable on last descriptor block associated with current xfer
952  *    IXL command if any callback ixl commands are found on the execution path
953  *    between the current and the next xfer ixl command.  (Previously, this
954  *    applied to store timestamp ixl commands, as well.)
955  */
956 static void
957 hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
958 {
959 	ixl1394_command_t *ixlcurp;		/* current ixl command */
960 	ixl1394_command_t *ixlnextp;		/* next ixl command */
961 	ixl1394_command_t *ixlexecnext;
962 	hci1394_xfer_ctl_t	*xferctl_curp;
963 	hci1394_xfer_ctl_t	*xferctl_nxtp;
964 	hci1394_desc_t		*hcidescp;
965 	ddi_acc_handle_t	acc_hdl;
966 	uint32_t	temp;
967 	uint32_t	dma_execnext_addr;
968 	uint32_t	dma_skiplabel_addr;
969 	uint32_t	dma_skip_addr;
970 	uint32_t	callback_cnt;
971 	uint16_t	repcnt;
972 	uint16_t	ixlopcode;
973 	int		ii;
974 	int		err;
975 
976 	TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_enter,
977 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
978 
979 	/*
980 	 * If xmit mode and if default skipmode is skip to label -
981 	 * follow exec path starting at default skipmode label until
982 	 * find the first ixl xfer command which is to be executed.
983 	 * Set its address into default_skipxferp.
984 	 */
985 	if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
986 	    (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
987 
988 		err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
989 		    NULL, &wvp->default_skipxferp);
990 		if (err == DDI_FAILURE) {
991 			TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
992 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
993 			    "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
994 			    "for skiplabel default w/no xfers", tnf_opaque,
995 			    skipixl_cmdp, wvp->default_skiplabelp);
996 			TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
997 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
998 
999 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1000 			return;
1001 		}
1002 	}
1003 
1004 	/* set first ixl cmd */
1005 	ixlnextp = wvp->ctxtp->ixl_firstp;
1006 
1007 	/* follow ixl links until reach end or find error */
1008 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
1009 
1010 		/* set this command as the current ixl command */
1011 		ixlcurp = ixlnextp;
1012 		ixlnextp = ixlcurp->next_ixlp;
1013 
1014 		/* get command opcode removing unneeded update flag */
1015 		ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
1016 
1017 		/*
1018 		 * Scan for next ixl xfer start command (including this one),
1019 		 * along ixl link path. Once xfer command found, find next IXL
1020 		 * xfer cmd along execution path and fill in branch address of
1021 		 * current xfer command. If is composite ixl xfer command, first
1022 		 * link forward branch dma addresses of each descriptor block in
1023 		 * composite, until reach final one then set its branch address
1024 		 * to next execution path xfer found.  Next determine skip mode
1025 		 * and fill in skip address(es) appropriately.
1026 		 */
1027 		/* skip to next if not xfer start ixl command */
1028 		if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
1029 		    ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
1030 			continue;
1031 		}
1032 
1033 		/*
1034 		 * get xfer_ctl structure and composite repeat count for current
1035 		 * IXL xfer cmd
1036 		 */
1037 		xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
1038 		repcnt = xferctl_curp->cnt;
1039 
1040 		/*
1041 		 * if initiated by an IXL label command, set interrupt enable
1042 		 * flag into last component of first descriptor block of
1043 		 * current IXL xfer cmd
1044 		 */
1045 		if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
1046 			hcidescp = (hci1394_desc_t *)
1047 			    xferctl_curp->dma[0].dma_descp;
1048 			acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
1049 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1050 			temp |= DESC_INTR_ENBL;
1051 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1052 		}
1053 
1054 		/* find next xfer IXL cmd by following execution path */
1055 		err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
1056 		    &callback_cnt, &ixlexecnext);
1057 
1058 		/* if label<->jump loop detected, return error */
1059 		if (err == DDI_FAILURE) {
1060 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1061 
1062 			TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
1063 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1064 			    "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
1065 			    "w/no xfers", tnf_opaque, ixl_cmdp,
1066 			    ixlcurp->next_ixlp);
1067 			continue;
1068 		}
1069 
1070 		/* link current IXL's xfer_ctl to next xfer IXL on exec path */
1071 		xferctl_curp->execp = ixlexecnext;
1072 
1073 		/*
1074 		 * if callbacks have been seen during execution path scan,
1075 		 * set interrupt enable flag into last descriptor of last
1076 		 * descriptor block of current IXL xfer cmd
1077 		 */
1078 		if (callback_cnt != 0) {
1079 			hcidescp = (hci1394_desc_t *)
1080 			    xferctl_curp->dma[repcnt - 1].dma_descp;
1081 			acc_hdl =
1082 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1083 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1084 			temp |= DESC_INTR_ENBL;
1085 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1086 		}
1087 
1088 		/*
1089 		 * obtain dma bound addr of next exec path IXL xfer command,
1090 		 * if any
1091 		 */
1092 		dma_execnext_addr = 0;
1093 
1094 		if (ixlexecnext != NULL) {
1095 			xferctl_nxtp = (hci1394_xfer_ctl_t *)
1096 			    ixlexecnext->compiler_privatep;
1097 			dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
1098 		} else {
1099 			/*
1100 			 * If this is last descriptor (next == NULL), then
1101 			 * make sure the interrupt bit is enabled.  This
1102 			 * way we can ensure that we are notified when the
1103 			 * descriptor chain processing has come to an end.
1104 			 */
1105 			hcidescp = (hci1394_desc_t *)
1106 			    xferctl_curp->dma[repcnt - 1].dma_descp;
1107 			acc_hdl =
1108 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1109 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1110 			temp |= DESC_INTR_ENBL;
1111 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1112 		}
1113 
1114 		/*
1115 		 * set jump address of final cur IXL xfer cmd to addr next
1116 		 * IXL xfer cmd
1117 		 */
1118 		hcidescp = (hci1394_desc_t *)
1119 		    xferctl_curp->dma[repcnt - 1].dma_descp;
1120 		acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1121 		ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
1122 
1123 		/*
1124 		 * if a composite object, forward link initial jump
1125 		 * dma addresses
1126 		 */
1127 		for (ii = 0; ii < repcnt - 1; ii++) {
1128 			hcidescp = (hci1394_desc_t *)
1129 			    xferctl_curp->dma[ii].dma_descp;
1130 			acc_hdl	 = xferctl_curp->dma[ii].dma_buf->bi_handle;
1131 			ddi_put32(acc_hdl, &hcidescp->branch,
1132 			    xferctl_curp->dma[ii + 1].dma_bound);
1133 		}
1134 
1135 		/*
1136 		 * fill in skip address(es) for all descriptor blocks belonging
1137 		 * to current IXL xfer command; note:skip addresses apply only
1138 		 * to xmit mode commands
1139 		 */
1140 		if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
1141 
1142 			/* first obtain and set skip mode information */
1143 			wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
1144 			hci1394_set_xmit_skip_mode(wvp);
1145 
1146 			/*
1147 			 * if skip to label,init dma bound addr to be
1148 			 * 1st xfer cmd after label
1149 			 */
1150 			dma_skiplabel_addr = 0;
1151 			if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
1152 			    (wvp->skipxferp != NULL)) {
1153 				xferctl_nxtp = (hci1394_xfer_ctl_t *)
1154 				    wvp->skipxferp->compiler_privatep;
1155 				dma_skiplabel_addr =
1156 				    xferctl_nxtp->dma[0].dma_bound;
1157 			}
1158 
1159 			/*
1160 			 * set skip addrs for each descriptor blk at this
1161 			 * xfer start IXL cmd
1162 			 */
1163 			for (ii = 0; ii < repcnt; ii++) {
1164 				switch (wvp->skipmode) {
1165 
1166 				case IXL1394_SKIP_TO_LABEL:
1167 					/* set dma bound address - label */
1168 					dma_skip_addr = dma_skiplabel_addr;
1169 					break;
1170 
1171 				case IXL1394_SKIP_TO_NEXT:
1172 					/* set dma bound address - next */
1173 					if (ii < repcnt - 1) {
1174 						dma_skip_addr = xferctl_curp->
1175 						    dma[ii + 1].dma_bound;
1176 					} else {
1177 						dma_skip_addr =
1178 						    dma_execnext_addr;
1179 					}
1180 					break;
1181 
1182 				case IXL1394_SKIP_TO_SELF:
1183 					/* set dma bound address - self */
1184 					dma_skip_addr =
1185 					    xferctl_curp->dma[ii].dma_bound;
1186 					break;
1187 
1188 				case IXL1394_SKIP_TO_STOP:
1189 				default:
1190 					/* set dma bound address - stop */
1191 					dma_skip_addr = 0;
1192 					break;
1193 				}
1194 
1195 				/*
1196 				 * determine address of first descriptor of
1197 				 * current descriptor block by adjusting addr of
1198 				 * last descriptor of current descriptor block
1199 				 */
1200 				hcidescp = ((hci1394_desc_t *)
1201 				    xferctl_curp->dma[ii].dma_descp);
1202 				acc_hdl =
1203 				    xferctl_curp->dma[ii].dma_buf->bi_handle;
1204 
1205 				/*
1206 				 * adjust by count of descriptors in this desc
1207 				 * block not including the last one (size of
1208 				 * descriptor)
1209 				 */
1210 				hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
1211 				    DESC_Z_MASK) - 1);
1212 
1213 				/*
1214 				 * adjust further if the last descriptor is
1215 				 * double sized
1216 				 */
1217 				if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
1218 					hcidescp++;
1219 				}
1220 				/*
1221 				 * now set skip address into first descriptor
1222 				 * of descriptor block
1223 				 */
1224 				ddi_put32(acc_hdl, &hcidescp->branch,
1225 				    dma_skip_addr);
1226 			} /* for */
1227 		} /* if */
1228 	} /* while */
1229 
1230 	TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
1231 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1232 }
1233 
1234 /*
1235  * hci1394_finalize_cur_xfer_desc()
1236  *    Build the openHCI descriptor for a packet or buffer based on info
1237  *    currently collected into the working vars struct (wvp).  After some
1238  *    checks, this routine dispatches to the appropriate descriptor block
1239  *    build (bld) routine for the packet or buf type.
1240  */
1241 static void
1242 hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
1243 {
1244 	uint16_t ixlopcode;
1245 	uint16_t ixlopraw;
1246 
1247 	TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_enter,
1248 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1249 
1250 	/* extract opcode from current IXL cmd (if any) */
1251 	if (wvp->ixl_cur_cmdp != NULL) {
1252 		ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
1253 		ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
1254 	} else {
1255 		ixlopcode = ixlopraw = IXL1394_OP_INVALID;
1256 	}
1257 
1258 	/*
1259 	 * if no xfer descriptor block being built, perform validity checks
1260 	 */
1261 	if (wvp->xfer_state == XFER_NONE) {
1262 		/*
1263 		 * error if being finalized by IXL1394_OP_LABEL or
1264 		 * IXL1394_OP_JUMP or if at end, and have an unapplied
1265 		 * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
1266 		 * IXL1394_OP_SET_SYNCWAIT
1267 		 */
1268 		if ((ixlopraw == IXL1394_OP_JUMP) ||
1269 		    (ixlopraw == IXL1394_OP_LABEL) ||
1270 		    (wvp->ixl_cur_cmdp == NULL) ||
1271 		    (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
1272 			if ((wvp->ixl_settagsync_cmdp != NULL) ||
1273 			    (wvp->ixl_setskipmode_cmdp != NULL) ||
1274 			    (wvp->ixl_setsyncwait_cnt != 0)) {
1275 
1276 				wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
1277 
1278 				TNF_PROBE_2(
1279 				    hci1394_finalize_cur_xfer_desc_set_error,
1280 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
1281 				    errmsg, "IXL1394_UNAPPLIED_SET_CMD: "
1282 				    "orphaned set (no associated packet)",
1283 				    tnf_opaque, ixl_commandp,
1284 				    wvp->ixl_cur_cmdp);
1285 				TNF_PROBE_0_DEBUG(
1286 				    hci1394_finalize_cur_xfer_desc_exit,
1287 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1288 				return;
1289 			}
1290 		}
1291 
1292 		/* error if finalize is due to updateable jump cmd */
1293 		if (ixlopcode == IXL1394_OP_JUMP_U) {
1294 
1295 			wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
1296 
1297 			TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_upd_error,
1298 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1299 			    "IXL1394_EUPDATE_DISALLOWED: jumpU w/out pkt",
1300 			    tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
1301 			TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1302 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1303 			return;
1304 		}
1305 
1306 		TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1307 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1308 
1309 		/* no error, no xfer */
1310 		return;
1311 	}
1312 
1313 	/*
1314 	 * finalize current xfer descriptor block being built
1315 	 */
1316 
1317 	/* count IXL xfer start command for descriptor block being built */
1318 	wvp->ixl_xfer_st_cnt++;
1319 
1320 	/*
1321 	 * complete setting of cache flush evaluation flags; flags will already
1322 	 * have been set by updateable set cmds and non-start xfer pkt cmds
1323 	 */
1324 	/* now set cache flush flag if current xfer start cmnd is updateable */
1325 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
1326 		wvp->xfer_hci_flush |= UPDATEABLE_XFER;
1327 	}
1328 	/*
1329 	 * also set cache flush flag if xfer being finalized by
1330 	 * updateable jump cmd
1331 	 */
1332 	if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
1333 		wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
1334 	}
1335 
1336 	/*
1337 	 * Determine if cache flush required before building next descriptor
1338 	 * block. If xfer pkt command and any cache flush flags are set,
1339 	 * hci flush needed.
1340 	 * If buffer or special xfer command and xfer command is updateable or
1341 	 * an associated set command is updateable, hci flush is required now.
1342 	 * If a single-xfer buffer or special xfer command is finalized by
1343 	 * updateable jump command, hci flush is required now.
1344 	 * Note: a cache flush will be required later, before the last
1345 	 * descriptor block of a multi-xfer set of descriptor blocks is built,
1346 	 * if this (non-pkt) xfer is finalized by an updateable jump command.
1347 	 */
1348 	if (wvp->xfer_hci_flush != 0) {
1349 		if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
1350 		    IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
1351 		    (UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
1352 		    0)) {
1353 
1354 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
1355 				TNF_PROBE_0_DEBUG(
1356 				    hci1394_finalize_cur_xfer_desc_exit,
1357 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1358 
1359 				/* wvp->dma_bld_error is set by above call */
1360 				return;
1361 			}
1362 		}
1363 	}
1364 
1365 	/*
1366 	 * determine which kind of descriptor block to build based on
1367 	 * xfer state - hdr only, skip cycle, pkt or buf.
1368 	 */
1369 	switch (wvp->xfer_state) {
1370 
1371 	case XFER_PKT:
1372 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1373 			hci1394_bld_recv_pkt_desc(wvp);
1374 		} else {
1375 			hci1394_bld_xmit_pkt_desc(wvp);
1376 		}
1377 		break;
1378 
1379 	case XFER_BUF:
1380 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1381 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
1382 				hci1394_bld_recv_buf_fill_desc(wvp);
1383 			} else {
1384 				hci1394_bld_recv_buf_ppb_desc(wvp);
1385 			}
1386 		} else {
1387 			hci1394_bld_xmit_buf_desc(wvp);
1388 		}
1389 		break;
1390 
1391 	case XMIT_HDRONLY:
1392 	case XMIT_NOPKT:
1393 		hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
1394 		break;
1395 
1396 	default:
1397 		/* internal compiler error */
1398 		TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_internal_error,
1399 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1400 		    "IXL1394_INTERNAL_ERROR: invalid state", tnf_opaque,
1401 		    ixl_commandp, wvp->ixl_cur_cmdp);
1402 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1403 	}
1404 
1405 	/* return if error */
1406 	if (wvp->dma_bld_error != 0) {
1407 		TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1408 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1409 
1410 		/* wvp->dma_bld_error is set by above call */
1411 		return;
1412 	}
1413 
1414 	/*
1415 	 * if was finalizing IXL jump cmd, set compiler_privatep to
1416 	 * cur xfer IXL cmd
1417 	 */
1418 	if (ixlopraw == IXL1394_OP_JUMP) {
1419 		wvp->ixl_cur_cmdp->compiler_privatep =
1420 		    (void *)wvp->ixl_cur_xfer_stp;
1421 	}
1422 
1423 	/* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
1424 	if (wvp->ixl_cur_labelp != NULL) {
1425 		((hci1394_xfer_ctl_t *)
1426 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
1427 		    XCTL_LABELLED;
1428 		wvp->ixl_cur_labelp = NULL;
1429 	}
1430 
1431 	/*
1432 	 * set any associated IXL set skipmode cmd into xfer_ctl of
1433 	 * cur xfer IXL cmd
1434 	 */
1435 	if (wvp->ixl_setskipmode_cmdp != NULL) {
1436 		((hci1394_xfer_ctl_t *)
1437 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
1438 		    wvp->ixl_setskipmode_cmdp;
1439 	}
1440 
1441 	/* set no current xfer start cmd */
1442 	wvp->ixl_cur_xfer_stp = NULL;
1443 
1444 	/* set no current set tag&sync, set skipmode or set syncwait commands */
1445 	wvp->ixl_settagsync_cmdp = NULL;
1446 	wvp->ixl_setskipmode_cmdp = NULL;
1447 	wvp->ixl_setsyncwait_cnt = 0;
1448 
1449 	/* set no currently active descriptor blocks */
1450 	wvp->descriptors = 0;
1451 
1452 	/* reset total packet length and buffers count */
1453 	wvp->xfer_pktlen = 0;
1454 	wvp->xfer_bufcnt = 0;
1455 
1456 	/* reset flush cache evaluation flags */
1457 	wvp->xfer_hci_flush = 0;
1458 
1459 	/* set no xmit descriptor block being built */
1460 	wvp->xfer_state = XFER_NONE;
1461 
1462 	TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1463 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1464 }
1465 
1466 /*
1467  * hci1394_bld_recv_pkt_desc()
1468  *    Used to create the openHCI dma descriptor block(s) for a receive packet.
1469  */
1470 static void
1471 hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1472 {
1473 	hci1394_xfer_ctl_t	*xctlp;
1474 	caddr_t			dma_descp;
1475 	uint32_t		dma_desc_bound;
1476 	uint32_t		wait_for_sync;
1477 	uint32_t		ii;
1478 	hci1394_desc_t		*wv_descp;	/* shorthand to local descrpt */
1479 
1480 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_enter,
1481 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1482 
1483 	/*
1484 	 * is error if number of descriptors to be built exceeds maximum
1485 	 * descriptors allowed in a descriptor block.
1486 	 */
1487 	if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1488 
1489 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1490 
1491 		TNF_PROBE_3(hci1394_bld_recv_pkt_desc_fragment_oflo_error,
1492 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1493 		    "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
1494 		    wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
1495 		    wvp->descriptors + wvp->xfer_bufcnt);
1496 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1497 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1498 		return;
1499 	}
1500 
1501 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1502 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1503 
1504 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1505 
1506 		TNF_PROBE_2(hci1394_bld_recv_pkt_desc_mem_alloc_fail,
1507 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1508 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1509 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1510 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1511 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1512 		return;
1513 	}
1514 
1515 	/*
1516 	 * save xfer_ctl struct addr in compiler_privatep of
1517 	 * current IXL xfer cmd
1518 	 */
1519 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1520 
1521 	/*
1522 	 * if enabled, set wait for sync flag in first descriptor of
1523 	 * descriptor block
1524 	 */
1525 	if (wvp->ixl_setsyncwait_cnt > 0) {
1526 		wvp->ixl_setsyncwait_cnt = 1;
1527 		wait_for_sync = DESC_W_ENBL;
1528 	} else {
1529 		wait_for_sync = DESC_W_DSABL;
1530 	}
1531 
1532 	/* create descriptor block for this recv packet (xfer status enabled) */
1533 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1534 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
1535 
1536 		if (ii == (wvp->xfer_bufcnt - 1)) {
1537 			HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
1538 			    DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
1539 		} else {
1540 			HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
1541 			    wvp->xfer_size[ii]);
1542 		}
1543 		wv_descp->data_addr = wvp->xfer_bufp[ii];
1544 		wv_descp->branch = 0;
1545 		wv_descp->status = (wvp->xfer_size[ii] <<
1546 		    DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
1547 		wvp->descriptors++;
1548 	}
1549 
1550 	/* allocate and copy descriptor block to dma memory */
1551 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1552 	    DDI_SUCCESS) {
1553 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1554 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1555 
1556 		/* wvp->dma_bld_error is set by above function call */
1557 		return;
1558 	}
1559 
1560 	/*
1561 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1562 	 * is last component)
1563 	 */
1564 	xctlp->dma[0].dma_bound = dma_desc_bound;
1565 	xctlp->dma[0].dma_descp =
1566 	    dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
1567 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1568 
1569 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1570 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1571 }
1572 
1573 /*
1574  * hci1394_bld_recv_buf_ppb_desc()
1575  *    Used to create the openHCI dma descriptor block(s) for a receive buf
1576  *    in packet per buffer mode.
1577  */
1578 static void
1579 hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
1580 {
1581 	hci1394_xfer_ctl_t	*xctlp;
1582 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1583 	caddr_t		dma_descp;
1584 	uint32_t	dma_desc_bound;
1585 	uint32_t	pktsize;
1586 	uint32_t	pktcnt;
1587 	uint32_t	wait_for_sync;
1588 	uint32_t	ii;
1589 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1590 
1591 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_enter,
1592 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1593 
1594 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1595 
1596 	/* determine number and size of pkt desc blocks to create */
1597 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
1598 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1599 
1600 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1601 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1602 
1603 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1604 
1605 		TNF_PROBE_2(hci1394_bld_recv_buf_ppb_desc_mem_alloc_fail,
1606 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1607 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1608 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1609 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1610 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1611 		return;
1612 	}
1613 
1614 	/*
1615 	 * save xfer_ctl struct addr in compiler_privatep of
1616 	 * current IXL xfer cmd
1617 	 */
1618 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1619 
1620 	/*
1621 	 * if enabled, set wait for sync flag in first descriptor in
1622 	 * descriptor block
1623 	 */
1624 	if (wvp->ixl_setsyncwait_cnt > 0) {
1625 		wvp->ixl_setsyncwait_cnt = 1;
1626 		wait_for_sync = DESC_W_ENBL;
1627 	} else {
1628 		wait_for_sync = DESC_W_DSABL;
1629 	}
1630 
1631 	/* create first descriptor block for this recv packet */
1632 	/* consists of one descriptor and xfer status is enabled */
1633 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
1634 	HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1635 	    wait_for_sync, pktsize);
1636 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1637 	wv_descp->branch = 0;
1638 	wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
1639 	    DESC_ST_RESCOUNT_MASK;
1640 	wvp->descriptors++;
1641 
1642 	/* useful debug trace info - IXL command, and packet count and size */
1643 	TNF_PROBE_3_DEBUG(hci1394_bld_recv_buf_ppb_desc_recv_buf_info,
1644 	    HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ixl_commandp,
1645 	    wvp->ixl_cur_xfer_stp, tnf_int, pkt_count, pktcnt, tnf_int,
1646 	    pkt_size, pktsize);
1647 
1648 	/*
1649 	 * generate as many contiguous descriptor blocks as there are
1650 	 * recv pkts
1651 	 */
1652 	for (ii = 0; ii < pktcnt; ii++) {
1653 
1654 		/* if about to create last descriptor block */
1655 		if (ii == (pktcnt - 1)) {
1656 			/* check and perform any required hci cache flush */
1657 			if (hci1394_flush_end_desc_check(wvp, ii) !=
1658 			    DDI_SUCCESS) {
1659 				TNF_PROBE_1_DEBUG(
1660 				    hci1394_bld_recv_buf_ppb_desc_fl_error,
1661 				    HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_int,
1662 				    for_ii, ii);
1663 				TNF_PROBE_0_DEBUG(
1664 				    hci1394_bld_recv_buf_ppb_desc_exit,
1665 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1666 
1667 				/* wvp->dma_bld_error is set by above call */
1668 				return;
1669 			}
1670 		}
1671 
1672 		/* allocate and copy descriptor block to dma memory */
1673 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1674 		    &dma_desc_bound) != DDI_SUCCESS) {
1675 
1676 			TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1677 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1678 
1679 			/* wvp->dma_bld_error is set by above call */
1680 			return;
1681 		}
1682 
1683 		/*
1684 		 * set dma addrs into xfer_ctl struct (unbound addr (kernel
1685 		 * virtual) is last component (descriptor))
1686 		 */
1687 		xctlp->dma[ii].dma_bound = dma_desc_bound;
1688 		xctlp->dma[ii].dma_descp = dma_descp;
1689 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
1690 
1691 		/* advance buffer ptr by pktsize in descriptor block */
1692 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1693 		    pktsize;
1694 	}
1695 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1696 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1697 }
1698 
1699 /*
1700  * hci1394_bld_recv_buf_fill_desc()
1701  *    Used to create the openHCI dma descriptor block(s) for a receive buf
1702  *    in buffer fill mode.
1703  */
1704 static void
1705 hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
1706 {
1707 	hci1394_xfer_ctl_t	*xctlp;
1708 	caddr_t			dma_descp;
1709 	uint32_t		dma_desc_bound;
1710 	uint32_t		wait_for_sync;
1711 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1712 
1713 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_enter,
1714 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1715 
1716 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1717 
1718 
1719 	/* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
1720 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1721 
1722 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1723 
1724 		TNF_PROBE_2(hci1394_bld_recv_buf_fill_desc_mem_alloc_fail,
1725 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1726 		    "IXL1394_EMEM_ALLOC_FAIL: xfer_ctl", tnf_opaque,
1727 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1728 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1729 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1730 		return;
1731 	}
1732 
1733 	/*
1734 	 * save xfer_ctl struct addr in compiler_privatep of
1735 	 * current IXL xfer cmd
1736 	 */
1737 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1738 
1739 	/*
1740 	 * if enabled, set wait for sync flag in first descriptor of
1741 	 * descriptor block
1742 	 */
1743 	if (wvp->ixl_setsyncwait_cnt > 0) {
1744 		wvp->ixl_setsyncwait_cnt = 1;
1745 		wait_for_sync = DESC_W_ENBL;
1746 	} else {
1747 		wait_for_sync = DESC_W_DSABL;
1748 	}
1749 
1750 	/*
1751 	 * create descriptor block for this buffer fill mode recv command which
1752 	 * consists of one descriptor with xfer status enabled
1753 	 */
1754 	HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
1755 	    DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
1756 
1757 	wvp->descriptor_block[wvp->descriptors].data_addr =
1758 	    local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1759 	wvp->descriptor_block[wvp->descriptors].branch = 0;
1760 	wvp->descriptor_block[wvp->descriptors].status =
1761 	    (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
1762 	    DESC_ST_RESCOUNT_MASK;
1763 	wvp->descriptors++;
1764 
1765 	/* check and perform any required hci cache flush */
1766 	if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
1767 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1768 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1769 
1770 		/* wvp->dma_bld_error is set by above call */
1771 		return;
1772 	}
1773 
1774 	/* allocate and copy descriptor block to dma memory */
1775 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
1776 	    != DDI_SUCCESS) {
1777 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1778 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1779 
1780 		/* wvp->dma_bld_error is set by above call */
1781 		return;
1782 	}
1783 
1784 	/*
1785 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1786 	 * is last component.
1787 	 */
1788 	xctlp->dma[0].dma_bound = dma_desc_bound;
1789 	xctlp->dma[0].dma_descp = dma_descp;
1790 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1791 
1792 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1793 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1794 }
1795 
1796 /*
1797  * hci1394_bld_xmit_pkt_desc()
1798  *    Used to create the openHCI dma descriptor block(s) for a transmit packet.
1799  */
1800 static void
1801 hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1802 {
1803 	hci1394_xfer_ctl_t *xctlp;
1804 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1805 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1806 	caddr_t		dma_descp;	/* dma bound memory for descriptor */
1807 	uint32_t	dma_desc_bound;
1808 	uint32_t	ii;
1809 
1810 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_enter,
1811 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1812 
1813 	/*
1814 	 * is error if number of descriptors to be built exceeds maximum
1815 	 * descriptors allowed in a descriptor block. Add 2 for the overhead
1816 	 * of the OMORE-Immediate.
1817 	 */
1818 	if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1819 
1820 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1821 
1822 		TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_fragment_oflo_error,
1823 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1824 		    "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
1825 		    wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
1826 		    wvp->descriptors + 2 + wvp->xfer_bufcnt);
1827 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1828 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1829 		return;
1830 	}
1831 
1832 	/* is error if total packet length exceeds 0xFFFF */
1833 	if (wvp->xfer_pktlen > 0xFFFF) {
1834 
1835 		wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
1836 
1837 		TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_packet_oflo_error,
1838 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1839 		    "IXL1394_EPKTSIZE_MAX_OFLO", tnf_opaque, ixl_commandp,
1840 		    wvp->ixl_cur_xfer_stp, tnf_int, total_pktlen,
1841 		    wvp->xfer_pktlen);
1842 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1843 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1844 		return;
1845 	}
1846 
1847 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1848 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1849 
1850 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1851 
1852 		TNF_PROBE_2(hci1394_bld_xmit_pkt_desc_mem_alloc_fail,
1853 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1854 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1855 		    ixl_commandp, wvp->ixl_cur_cmdp);
1856 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1857 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1858 		return;
1859 	}
1860 
1861 	/*
1862 	 * save xfer_ctl struct addr in compiler_privatep of
1863 	 * current IXL xfer cmd
1864 	 */
1865 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1866 
1867 	/* generate values for the xmit pkt hdrs */
1868 	hci1394_set_xmit_pkt_hdr(wvp);
1869 
1870 	/*
1871 	 * xmit pkt starts with an output more immediate,
1872 	 * a double sized hci1394_desc
1873 	 */
1874 	wv_omi_descp = (hci1394_output_more_imm_t *)
1875 	    (&wvp->descriptor_block[wvp->descriptors]);
1876 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1877 
1878 	wv_omi_descp->data_addr = 0;
1879 	wv_omi_descp->branch = 0;
1880 	wv_omi_descp->status = 0;
1881 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1882 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1883 	wv_omi_descp->q3 = 0;
1884 	wv_omi_descp->q4 = 0;
1885 
1886 	wvp->descriptors += 2;
1887 
1888 	/*
1889 	 * create the required output more hci1394_desc descriptor, then create
1890 	 * an output last hci1394_desc descriptor with xfer status enabled
1891 	 */
1892 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1893 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
1894 
1895 		if (ii == (wvp->xfer_bufcnt - 1)) {
1896 			HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
1897 			    DESC_INTR_DSABL, wvp->xfer_size[ii]);
1898 		} else {
1899 			HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
1900 		}
1901 		wv_descp->data_addr = wvp->xfer_bufp[ii];
1902 		wv_descp->branch = 0;
1903 		wv_descp->status = 0;
1904 		wvp->descriptors++;
1905 	}
1906 
1907 	/* allocate and copy descriptor block to dma memory */
1908 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1909 	    DDI_SUCCESS) {
1910 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1911 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1912 
1913 		/* wvp->dma_bld_error is set by above call */
1914 		return;
1915 	}
1916 
1917 	/*
1918 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1919 	 * is last component (descriptor))
1920 	 */
1921 	xctlp->dma[0].dma_bound = dma_desc_bound;
1922 	xctlp->dma[0].dma_descp =
1923 	    dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
1924 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1925 
1926 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1927 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1928 }
1929 
1930 /*
1931  * hci1394_bld_xmit_buf_desc()
1932  *    Used to create the openHCI dma descriptor blocks for a transmit buffer.
1933  */
1934 static void
1935 hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
1936 {
1937 	hci1394_xfer_ctl_t	*xctlp;
1938 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1939 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1940 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1941 	caddr_t		dma_descp;
1942 	uint32_t	dma_desc_bound;
1943 	uint32_t	pktsize;
1944 	uint32_t	pktcnt;
1945 	uint32_t	ii;
1946 
1947 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_enter,
1948 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1949 
1950 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1951 
1952 	/* determine number and size of pkt desc blocks to create */
1953 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
1954 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1955 
1956 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1957 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1958 
1959 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1960 
1961 		TNF_PROBE_2(hci1394_bld_xmit_buf_desc_mem_alloc_fail,
1962 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1963 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1964 		    ixl_commandp, wvp->ixl_cur_cmdp);
1965 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
1966 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1967 		return;
1968 	}
1969 
1970 	/*
1971 	 * save xfer_ctl struct addr in compiler_privatep of
1972 	 * current IXL xfer cmd
1973 	 */
1974 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1975 
1976 	/* generate values for the xmit pkt hdrs */
1977 	wvp->xfer_pktlen = pktsize;
1978 	hci1394_set_xmit_pkt_hdr(wvp);
1979 
1980 	/*
1981 	 * xmit pkt starts with an output more immediate,
1982 	 * a double sized hci1394_desc
1983 	 */
1984 	wv_omi_descp = (hci1394_output_more_imm_t *)
1985 	    &wvp->descriptor_block[wvp->descriptors];
1986 
1987 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1988 
1989 	wv_omi_descp->data_addr = 0;
1990 	wv_omi_descp->branch = 0;
1991 	wv_omi_descp->status = 0;
1992 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1993 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1994 	wv_omi_descp->q3 = 0;
1995 	wv_omi_descp->q4 = 0;
1996 
1997 	wvp->descriptors += 2;
1998 
1999 	/* follow with a single output last descriptor w/status enabled */
2000 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
2001 	HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
2002 	    pktsize);
2003 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
2004 	wv_descp->branch = 0;
2005 	wv_descp->status = 0;
2006 	wvp->descriptors++;
2007 
2008 	/*
2009 	 * generate as many contiguous descriptor blocks as there are
2010 	 * xmit packets
2011 	 */
2012 	for (ii = 0; ii < pktcnt; ii++) {
2013 
2014 		/* if about to create last descriptor block */
2015 		if (ii == (pktcnt - 1)) {
2016 			/* check and perform any required hci cache flush */
2017 			if (hci1394_flush_end_desc_check(wvp, ii) !=
2018 			    DDI_SUCCESS) {
2019 				TNF_PROBE_0_DEBUG(
2020 				    hci1394_bld_xmit_buf_desc_exit,
2021 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2022 
2023 				/* wvp->dma_bld_error is set by above call */
2024 				return;
2025 			}
2026 		}
2027 
2028 		/* allocate and copy descriptor block to dma memory */
2029 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
2030 		    &dma_desc_bound) != DDI_SUCCESS) {
2031 			TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
2032 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2033 
2034 			/* wvp->dma_bld_error is set by above call */
2035 			return;
2036 		}
2037 
2038 		/*
2039 		 * set dma addrs into xfer_ctl structure (unbound addr
2040 		 * (kernel virtual) is last component (descriptor))
2041 		 */
2042 		xctlp->dma[ii].dma_bound = dma_desc_bound;
2043 		xctlp->dma[ii].dma_descp = dma_descp + 2 *
2044 		    sizeof (hci1394_desc_t);
2045 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
2046 
2047 		/* advance buffer ptr by pktsize in descriptor block */
2048 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
2049 		    pktsize;
2050 	}
2051 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
2052 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2053 }
2054 
2055 /*
2056  * hci1394_bld_xmit_hdronly_nopkt_desc()
2057  *    Used to create the openHCI dma descriptor blocks for transmitting
2058  *    a packet consisting of an isochronous header with no data payload,
2059  *    or for not sending a packet at all for a cycle.
2060  *
2061  *    A Store_Value openhci descriptor is built at the start of each
2062  *    IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
2063  *    descriptor block (to allow for skip cycle specification and set skipmode
2064  *    processing for these commands).
2065  */
2066 static void
2067 hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
2068 {
2069 	hci1394_xfer_ctl_t	*xctlp;
2070 	hci1394_output_last_t	*wv_ol_descp; /* shorthand to local descrp */
2071 	hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
2072 	caddr_t		dma_descp;
2073 	uint32_t	dma_desc_bound;
2074 	uint32_t	repcnt;
2075 	uint32_t	ii;
2076 
2077 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_enter,
2078 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2079 
2080 	/* determine # of instances of output hdronly/nopkt to generate */
2081 	repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
2082 
2083 	/*
2084 	 * allocate an xfer_ctl structure which includes repcnt
2085 	 * xfer_ctl_dma structs
2086 	 */
2087 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
2088 
2089 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2090 
2091 		TNF_PROBE_2(hci1394_bld_xmit_hdronly_nopkt_desc_mem_alloc_fail,
2092 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2093 		    "IXL EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
2094 		    ixl_commandp, wvp->ixl_cur_cmdp);
2095 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2096 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2097 		return;
2098 	}
2099 
2100 	/*
2101 	 * save xfer_ctl struct addr in compiler_privatep of
2102 	 * current IXL xfer command
2103 	 */
2104 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
2105 
2106 	/*
2107 	 * create a storevalue descriptor
2108 	 * (will be used for skip vs jump processing)
2109 	 */
2110 	hci1394_set_xmit_storevalue_desc(wvp);
2111 
2112 	/*
2113 	 * processing now based on opcode:
2114 	 * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
2115 	 */
2116 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
2117 	    IXL1394_OP_SEND_HDR_ONLY) {
2118 
2119 		/* for header only, generate values for the xmit pkt hdrs */
2120 		hci1394_set_xmit_pkt_hdr(wvp);
2121 
2122 		/*
2123 		 * create an output last immediate (double sized) descriptor
2124 		 * xfer status enabled
2125 		 */
2126 		wv_oli_descp = (hci1394_output_last_imm_t *)
2127 		    &wvp->descriptor_block[wvp->descriptors];
2128 
2129 		HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
2130 		    DESC_INTR_DSABL);
2131 
2132 		wv_oli_descp->data_addr = 0;
2133 		wv_oli_descp->branch = 0;
2134 		wv_oli_descp->status = 0;
2135 		wv_oli_descp->q1 = wvp->xmit_pkthdr1;
2136 		wv_oli_descp->q2 = wvp->xmit_pkthdr2;
2137 		wv_oli_descp->q3 = 0;
2138 		wv_oli_descp->q4 = 0;
2139 		wvp->descriptors += 2;
2140 	} else {
2141 		/*
2142 		 * for skip cycle, create a single output last descriptor
2143 		 * with xfer status enabled
2144 		 */
2145 		wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
2146 		HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
2147 		    DESC_INTR_DSABL, 0);
2148 		wv_ol_descp->data_addr = 0;
2149 		wv_ol_descp->branch = 0;
2150 		wv_ol_descp->status = 0;
2151 		wvp->descriptors++;
2152 	}
2153 
2154 	/*
2155 	 * generate as many contiguous descriptor blocks as repeat count
2156 	 * indicates
2157 	 */
2158 	for (ii = 0; ii < repcnt; ii++) {
2159 
2160 		/* if about to create last descriptor block */
2161 		if (ii == (repcnt - 1)) {
2162 			/* check and perform any required hci cache flush */
2163 			if (hci1394_flush_end_desc_check(wvp, ii) !=
2164 			    DDI_SUCCESS) {
2165 				TNF_PROBE_0_DEBUG(
2166 				    hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2167 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2168 
2169 				/* wvp->dma_bld_error is set by above call */
2170 				return;
2171 			}
2172 		}
2173 
2174 		/* allocate and copy descriptor block to dma memory */
2175 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
2176 		    &dma_desc_bound) != DDI_SUCCESS) {
2177 			TNF_PROBE_0_DEBUG(
2178 			    hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2179 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2180 
2181 			/* wvp->dma_bld_error is set by above call */
2182 			return;
2183 		}
2184 
2185 		/*
2186 		 * set dma addrs into xfer_ctl structure (unbound addr
2187 		 * (kernel virtual) is last component (descriptor)
2188 		 */
2189 		xctlp->dma[ii].dma_bound = dma_desc_bound;
2190 		xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
2191 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
2192 	}
2193 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2194 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2195 }
2196 
2197 /*
2198  * hci1394_bld_dma_mem_desc_blk()
2199  *    Used to put a given OpenHCI descriptor block into dma bound memory.
2200  */
2201 static int
2202 hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
2203     uint32_t *dma_desc_bound)
2204 {
2205 	uint32_t	dma_bound;
2206 
2207 	TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_enter,
2208 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2209 
2210 	/* set internal error if no descriptor blocks to build */
2211 	if (wvp->descriptors == 0) {
2212 
2213 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
2214 
2215 		TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_error,
2216 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2217 		    "IXL1394_INTERNAL_ERROR: no descriptors to build");
2218 		TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2219 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2220 		return (DDI_FAILURE);
2221 	}
2222 
2223 	/* allocate dma memory and move this descriptor block to it */
2224 	*dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
2225 	    sizeof (hci1394_desc_t), &dma_bound);
2226 
2227 	if (*dma_descpp == NULL) {
2228 
2229 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2230 
2231 		TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_fail,
2232 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2233 		    "IXL1394_EMEM_ALLOC_FAIL: for descriptors");
2234 		TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2235 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2236 		return (DDI_FAILURE);
2237 	}
2238 #ifdef _KERNEL
2239 	ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
2240 	    (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
2241 	    wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
2242 	    DDI_DEV_AUTOINCR);
2243 #else
2244 	bcopy(wvp->descriptor_block, *dma_descpp,
2245 	    wvp->descriptors * sizeof (hci1394_desc_t));
2246 #endif
2247 	/*
2248 	 * convert allocated block's memory address to bus address space
2249 	 * include properly set Z bits (descriptor count).
2250 	 */
2251 	*dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
2252 
2253 	TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2254 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2255 
2256 	return (DDI_SUCCESS);
2257 }
2258 
2259 /*
2260  * hci1394_set_xmit_pkt_hdr()
2261  *    Compose the 2 quadlets for the xmit packet header.
2262  */
2263 static void
2264 hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
2265 {
2266 	uint16_t tag;
2267 	uint16_t sync;
2268 
2269 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_enter,
2270 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2271 
2272 	/*
2273 	 * choose tag and sync bits for header either from default values or
2274 	 * from currently active set tag and sync IXL command
2275 	 * (clear command after use)
2276 	 */
2277 	if (wvp->ixl_settagsync_cmdp == NULL) {
2278 		tag = wvp->default_tag;
2279 		sync = wvp->default_sync;
2280 	} else {
2281 		tag = wvp->ixl_settagsync_cmdp->tag;
2282 		sync = wvp->ixl_settagsync_cmdp->sync;
2283 		wvp->ixl_settagsync_cmdp = NULL;
2284 	}
2285 	tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
2286 	sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
2287 
2288 	/*
2289 	 * build xmit pkt header -
2290 	 * hdr1 has speed, tag, channel number and sync bits
2291 	 * hdr2 has the packet length.
2292 	 */
2293 	wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
2294 	    (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
2295 	    DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
2296 	    DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
2297 
2298 	wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
2299 
2300 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_exit,
2301 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2302 }
2303 
2304 /*
2305  * hci1394_set_xmit_skip_mode()
2306  *    Set current skip mode from default or from currently active command.
2307  *    If non-default skip mode command's skip mode is skip to label, find
2308  *    and set xfer start IXL command which follows skip to label into
2309  *    compiler_privatep of set skipmode IXL command.
2310  */
2311 static void
2312 hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
2313 {
2314 	int err;
2315 
2316 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_enter,
2317 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2318 
2319 	if (wvp->ixl_setskipmode_cmdp == NULL) {
2320 		wvp->skipmode = wvp->default_skipmode;
2321 		wvp->skiplabelp = wvp->default_skiplabelp;
2322 		wvp->skipxferp = wvp->default_skipxferp;
2323 	} else {
2324 		wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
2325 		wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
2326 		wvp->skipxferp = NULL;
2327 		if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
2328 			err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
2329 			    NULL, &wvp->skipxferp);
2330 			if (err == DDI_FAILURE) {
2331 				TNF_PROBE_2(hci1394_set_xmit_skip_mode_error,
2332 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
2333 				    errmsg, "IXL1394_ENO_DATA_PKTS: "
2334 				    "label<->jump loop detected for skiplabel "
2335 				    "w/no xfers", tnf_opaque, setskip_cmdp,
2336 				    wvp->ixl_setskipmode_cmdp);
2337 				wvp->skipxferp = NULL;
2338 				wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
2339 			}
2340 		}
2341 		wvp->ixl_setskipmode_cmdp->compiler_privatep =
2342 		    (void *)wvp->skipxferp;
2343 	}
2344 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_exit,
2345 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2346 }
2347 
2348 /*
2349  * hci1394_set_xmit_storevalue_desc()
2350  *    Set up store_value DMA descriptor.
2351  *    XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
2352  *    descriptor in the descriptor block (to handle skip mode processing)
2353  */
2354 static void
2355 hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
2356 {
2357 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_enter,
2358 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2359 
2360 	wvp->descriptors++;
2361 
2362 	HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
2363 	    wvp->storevalue_data);
2364 	wvp->descriptor_block[wvp->descriptors - 1].data_addr =
2365 	    wvp->storevalue_bufp;
2366 	wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
2367 	wvp->descriptor_block[wvp->descriptors - 1].status = 0;
2368 
2369 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_exit,
2370 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2371 }
2372 
2373 /*
2374  * hci1394_set_next_xfer_buf()
2375  *    This routine adds the data buffer to the current wvp list.
2376  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2377  *    contains the error code.
2378  */
2379 static int
2380 hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
2381     uint16_t size)
2382 {
2383 	TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_enter,
2384 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2385 
2386 	/* error if buffer pointer is null (size may be 0) */
2387 	if (bufp == 0) {
2388 
2389 		wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
2390 
2391 		TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2392 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2393 		return (DDI_FAILURE);
2394 	}
2395 
2396 	/* count new xfer buffer */
2397 	wvp->xfer_bufcnt++;
2398 
2399 	/* error if exceeds maximum xfer buffer components allowed */
2400 	if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
2401 
2402 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
2403 
2404 		TNF_PROBE_2(hci1394_set_next_xfer_buf_error,
2405 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2406 		    "IXL1394_EFRAGMENT_OFLO", tnf_int, frag_count,
2407 		    wvp->xfer_bufcnt);
2408 		TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2409 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2410 		return (DDI_FAILURE);
2411 	}
2412 
2413 	/* save xmit buffer and size */
2414 	wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
2415 	wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
2416 
2417 	/* accumulate total packet length */
2418 	wvp->xfer_pktlen += size;
2419 
2420 	TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2421 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2422 	return (DDI_SUCCESS);
2423 }
2424 
2425 /*
2426  * hci1394_flush_end_desc_check()
2427  *    Check if flush required before last descriptor block of a
2428  *    non-unary set generated by an xfer buff or xmit special command
2429  *    or a unary set provided no other flush has already been done.
2430  *
2431  *    hci flush is required if xfer is finalized by an updateable
2432  *    jump command.
2433  *
2434  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2435  *    will contain the error code.
2436  */
2437 static int
2438 hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
2439 {
2440 	TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_enter,
2441 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2442 
2443 	if ((count != 0) ||
2444 	    ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
2445 	    INITIATING_LBL)) == 0)) {
2446 
2447 		if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
2448 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
2449 
2450 				TNF_PROBE_0_DEBUG(
2451 				    hci1394_flush_end_desc_check_exit,
2452 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2453 
2454 				/* wvp->dma_bld_error is set by above call */
2455 				return (DDI_FAILURE);
2456 			}
2457 		}
2458 	}
2459 
2460 	TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_exit,
2461 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2462 	return (DDI_SUCCESS);
2463 }
2464 
2465 /*
2466  * hci1394_flush_hci_cache()
2467  *    Sun hci controller (RIO) implementation specific processing!
2468  *
2469  *    Allocate dma memory for 1 hci descriptor block which will be left unused.
2470  *    During execution this will cause a break in the contiguous address space
2471  *    processing required by Sun's RIO implementation of the ohci controller and
2472  *    will require the controller to refetch the next descriptor block from
2473  *    host memory.
2474  *
2475  *    General rules for cache flush preceeding a descriptor block in dma memory:
2476  *    1. Current IXL Xfer Command Updateable Rule:
2477  *	    Cache flush of IXL xfer command is required if it, or any of the
2478  *	    non-start IXL packet xfer commands associated with it, is flagged
2479  *	    updateable.
2480  *    2. Next IXL Xfer Command Indeterminate Rule:
2481  *	    Cache flush of IXL xfer command is required if an IXL jump command
2482  *	    which is flagged updateable has finalized the current IXL xfer
2483  *	    command.
2484  *    3. Updateable IXL Set Command Rule:
2485  *	    Cache flush of an IXL xfer command is required if any of the IXL
2486  *	    "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
2487  *	    command (i.e. immediately preceeding it), is flagged updateable.
2488  *    4. Label Initiating Xfer Command Rule:
2489  *	    Cache flush of IXL xfer command is required if it is initiated by a
2490  *	    label IXL command.  (This is to allow both a flush of the cache and
2491  *	    an interrupt to be generated easily and in close proximity to each
2492  *	    other.  This can make possible simpler more successful reset of
2493  *	    descriptor statuses, especially under circumstances where the cycle
2494  *	    of hci commands is short and/or there are no callbacks distributed
2495  *	    through the span of xfers, etc...  This is especially important for
2496  *	    input where statuses must be reset before execution cycles back
2497  *	    again.
2498  *
2499  *    Application of above rules:
2500  *    Packet mode IXL xfer commands:
2501  *	    If any of the above flush rules apply, flush cache should be done
2502  *	    immediately preceeding the generation of the dma descriptor block
2503  *	    for the packet xfer.
2504  *    Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
2505  *    SEND_HDR_ONLY, and SEND_NO_PKT):
2506  *	    If Rules #1, #3 or #4 applies, a flush cache should be done
2507  *	    immediately before the first generated dma descriptor block of the
2508  *	    non-packet xfer.
2509  *	    If Rule #2 applies, a flush cache should be done immediately before
2510  *	    the last generated dma descriptor block of the non-packet xfer.
2511  *
2512  *    Note: The flush cache should be done at most once in each location that is
2513  *    required to be flushed no matter how many rules apply (i.e. only once
2514  *    before the first descriptor block and/or only once before the last
2515  *    descriptor block generated).  If more than one place requires a flush,
2516  *    then both flush operations must be performed.  This is determined by
2517  *    taking all rules that apply into account.
2518  *
2519  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2520  *    will contain the error code.
2521  */
2522 static int
2523 hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
2524 {
2525 	uint32_t	dma_bound;
2526 
2527 	TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_enter,
2528 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2529 
2530 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
2531 	    NULL) {
2532 
2533 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2534 
2535 		TNF_PROBE_1(hci1394_flush_hci_cache_fail,
2536 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2537 		    "IXL1394_EMEM_ALLOC_FAIL: for flush_hci_cache");
2538 		TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
2539 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2540 		return (DDI_FAILURE);
2541 	}
2542 
2543 	TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
2544 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2545 	return (DDI_SUCCESS);
2546 }
2547 
2548 /*
2549  * hci1394_alloc_storevalue_dma_mem()
2550  *    Allocate dma memory for a 1 hci component descriptor block
2551  *    which will be used as the dma memory location that ixl
2552  *    compiler generated storevalue descriptor commands will
2553  *    specify as location to store their data value.
2554  *
2555  *    Returns 32-bit bound address of allocated mem, or NULL.
2556  */
2557 static uint32_t
2558 hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
2559 {
2560 	uint32_t	dma_bound;
2561 
2562 	TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_enter,
2563 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2564 
2565 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
2566 	    &dma_bound) == NULL) {
2567 
2568 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2569 
2570 		TNF_PROBE_2(hci1394_bld_alloc_storevalue_dma_mem_alloc_fail,
2571 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2572 		    "IXL1394_EMEM_ALLOC_FAIL: for storevalue dma",
2573 		    tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
2574 		TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
2575 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2576 		return (0);
2577 	}
2578 
2579 	TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
2580 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2581 
2582 	/* return bound address of allocated memory */
2583 	return (dma_bound);
2584 }
2585 
2586 
2587 /*
2588  * hci1394_alloc_xfer_ctl()
2589  *    Allocate an xfer_ctl structure.
2590  */
2591 static hci1394_xfer_ctl_t *
2592 hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
2593 {
2594 	hci1394_xfer_ctl_t *xcsp;
2595 
2596 	TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_enter,
2597 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2598 
2599 	/*
2600 	 * allocate an xfer_ctl struct which includes dmacnt of
2601 	 * xfer_ctl_dma structs
2602 	 */
2603 #ifdef _KERNEL
2604 	if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
2605 	    (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2606 	    sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
2607 
2608 		TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2609 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2610 		return (NULL);
2611 	}
2612 #else
2613 	/*
2614 	 * This section makes it possible to easily run and test the compiler in
2615 	 * user mode.
2616 	 */
2617 	if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
2618 	    sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2619 	    sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
2620 
2621 		TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2622 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2623 		return (NULL);
2624 	}
2625 #endif
2626 	/*
2627 	 * set dma structure count into allocated xfer_ctl struct for
2628 	 * later deletion.
2629 	 */
2630 	xcsp->cnt = dmacnt;
2631 
2632 	/* link it to previously allocated xfer_ctl structs or set as first */
2633 	if (wvp->xcs_firstp == NULL) {
2634 		wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
2635 	} else {
2636 		wvp->xcs_currentp->ctl_nextp = xcsp;
2637 		wvp->xcs_currentp = xcsp;
2638 	}
2639 
2640 	TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2641 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2642 
2643 	/* return allocated xfer_ctl structure */
2644 	return (xcsp);
2645 }
2646 
2647 /*
2648  * hci1394_alloc_dma_mem()
2649  *	Allocates and binds memory for openHCI DMA descriptors as needed.
2650  */
2651 static void *
2652 hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
2653     uint32_t *dma_bound)
2654 {
2655 	hci1394_idma_desc_mem_t *dma_new;
2656 	hci1394_buf_parms_t parms;
2657 	hci1394_buf_info_t *memp;
2658 	void	*dma_mem_ret;
2659 	int	ret;
2660 
2661 	TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_enter,
2662 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2663 
2664 	/*
2665 	 * if no dma has been allocated or current request exceeds
2666 	 * remaining memory
2667 	 */
2668 	if ((wvp->dma_currentp == NULL) ||
2669 	    (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
2670 	    wvp->dma_currentp->used))) {
2671 #ifdef _KERNEL
2672 		/* kernel-mode memory allocation for driver */
2673 
2674 		/* allocate struct to track more dma descriptor memory */
2675 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2676 		    kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
2677 		    KM_NOSLEEP)) == NULL) {
2678 
2679 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2680 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2681 			return (NULL);
2682 		}
2683 
2684 		/*
2685 		 * if more cookies available from the current mem, try to find
2686 		 * one of suitable size. Cookies that are too small will be
2687 		 * skipped and unused. Given that cookie size is always at least
2688 		 * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
2689 		 * it's a small price to pay for code simplicity.
2690 		 */
2691 		if (wvp->dma_currentp != NULL) {
2692 			/* new struct is derived from current */
2693 			memp = &wvp->dma_currentp->mem;
2694 			dma_new->mem = *memp;
2695 			dma_new->offset = wvp->dma_currentp->offset +
2696 			    memp->bi_cookie.dmac_size;
2697 
2698 			for (; memp->bi_cookie_count > 1;
2699 			    memp->bi_cookie_count--) {
2700 				ddi_dma_nextcookie(memp->bi_dma_handle,
2701 				    &dma_new->mem.bi_cookie);
2702 
2703 				if (dma_new->mem.bi_cookie.dmac_size >= size) {
2704 					dma_new->mem_handle =
2705 					    wvp->dma_currentp->mem_handle;
2706 					wvp->dma_currentp->mem_handle = NULL;
2707 					dma_new->mem.bi_cookie_count--;
2708 					break;
2709 				}
2710 				dma_new->offset +=
2711 				    dma_new->mem.bi_cookie.dmac_size;
2712 			}
2713 		}
2714 
2715 		/* if no luck with current buffer, allocate a new one */
2716 		if (dma_new->mem_handle == NULL) {
2717 			parms.bp_length = HCI1394_IXL_PAGESIZE;
2718 			parms.bp_max_cookies = OHCI_MAX_COOKIE;
2719 			parms.bp_alignment = 16;
2720 			ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
2721 			    &parms, &dma_new->mem, &dma_new->mem_handle);
2722 			if (ret != DDI_SUCCESS) {
2723 				kmem_free(dma_new,
2724 				    sizeof (hci1394_idma_desc_mem_t));
2725 
2726 				TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2727 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2728 				return (NULL);
2729 			}
2730 
2731 			/* paranoia: this is not supposed to happen */
2732 			if (dma_new->mem.bi_cookie.dmac_size < size) {
2733 				hci1394_buf_free(&dma_new->mem_handle);
2734 				kmem_free(dma_new,
2735 				    sizeof (hci1394_idma_desc_mem_t));
2736 
2737 				TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2738 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2739 				return (NULL);
2740 			}
2741 			dma_new->offset = 0;
2742 		}
2743 #else
2744 		/* user-mode memory allocation for user mode compiler tests */
2745 		/* allocate another dma_desc_mem struct */
2746 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2747 		    calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
2748 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2749 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2750 			return (NULL);
2751 		}
2752 		dma_new->mem.bi_dma_handle = NULL;
2753 		dma_new->mem.bi_handle = NULL;
2754 		if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
2755 		    HCI1394_IXL_PAGESIZE)) == NULL) {
2756 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2757 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2758 			return (NULL);
2759 		}
2760 		dma_new->mem.bi_cookie.dmac_address =
2761 		    (unsigned long)dma_new->mem.bi_kaddr;
2762 		dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
2763 		dma_new->mem.bi_cookie_count = 1;
2764 #endif
2765 
2766 		/* if this is not first dma_desc_mem, link last one to it */
2767 		if (wvp->dma_currentp != NULL) {
2768 			wvp->dma_currentp->dma_nextp = dma_new;
2769 			wvp->dma_currentp = dma_new;
2770 		} else {
2771 			/* else set it as first one */
2772 			wvp->dma_currentp = wvp->dma_firstp = dma_new;
2773 		}
2774 	}
2775 
2776 	/* now allocate requested memory from current block */
2777 	dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
2778 	    wvp->dma_currentp->offset + wvp->dma_currentp->used;
2779 	*dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
2780 	    wvp->dma_currentp->used;
2781 	wvp->dma_currentp->used += size;
2782 
2783 	TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2784 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2785 	return (dma_mem_ret);
2786 }
2787 
2788 
2789 /*
2790  * hci1394_is_opcode_valid()
2791  *    given an ixl opcode, this routine returns B_TRUE if it is a
2792  *    recognized opcode and B_FALSE if it is not recognized.
2793  *    Note that the FULL 16 bits of the opcode are checked which includes
2794  *    various flags and not just the low order 8 bits of unique code.
2795  */
2796 static boolean_t
2797 hci1394_is_opcode_valid(uint16_t ixlopcode)
2798 {
2799 	TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
2800 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2801 
2802 	/* if it's not one we know about, then it's bad */
2803 	switch (ixlopcode) {
2804 	case IXL1394_OP_LABEL:
2805 	case IXL1394_OP_JUMP:
2806 	case IXL1394_OP_CALLBACK:
2807 	case IXL1394_OP_RECV_PKT:
2808 	case IXL1394_OP_RECV_PKT_ST:
2809 	case IXL1394_OP_RECV_BUF:
2810 	case IXL1394_OP_SEND_PKT:
2811 	case IXL1394_OP_SEND_PKT_ST:
2812 	case IXL1394_OP_SEND_PKT_WHDR_ST:
2813 	case IXL1394_OP_SEND_BUF:
2814 	case IXL1394_OP_SEND_HDR_ONLY:
2815 	case IXL1394_OP_SEND_NO_PKT:
2816 	case IXL1394_OP_STORE_TIMESTAMP:
2817 	case IXL1394_OP_SET_TAGSYNC:
2818 	case IXL1394_OP_SET_SKIPMODE:
2819 	case IXL1394_OP_SET_SYNCWAIT:
2820 	case IXL1394_OP_JUMP_U:
2821 	case IXL1394_OP_CALLBACK_U:
2822 	case IXL1394_OP_RECV_PKT_U:
2823 	case IXL1394_OP_RECV_PKT_ST_U:
2824 	case IXL1394_OP_RECV_BUF_U:
2825 	case IXL1394_OP_SEND_PKT_U:
2826 	case IXL1394_OP_SEND_PKT_ST_U:
2827 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
2828 	case IXL1394_OP_SEND_BUF_U:
2829 	case IXL1394_OP_SET_TAGSYNC_U:
2830 	case IXL1394_OP_SET_SKIPMODE_U:
2831 		TNF_PROBE_1_DEBUG(hci1394_is_opcode_valid_enter,
2832 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
2833 		    "ixl opcode is valid");
2834 		TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
2835 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2836 		return (B_TRUE);
2837 	default:
2838 		TNF_PROBE_2(hci1394_is_opcode_valid_enter,
2839 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
2840 		    "ixl opcode is NOT valid", tnf_opaque, ixl_opcode,
2841 		    ixlopcode);
2842 		TNF_PROBE_0_DEBUG(hci1394_is_opcode_valid_enter,
2843 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2844 		return (B_FALSE);
2845 	}
2846 }
2847