17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
57c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
67c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
77c478bd9Sstevel@tonic-gate  * with the License.
87c478bd9Sstevel@tonic-gate  *
97c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
107c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
117c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
127c478bd9Sstevel@tonic-gate  * and limitations under the License.
137c478bd9Sstevel@tonic-gate  *
147c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
157c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
167c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
177c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
187c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
197c478bd9Sstevel@tonic-gate  *
207c478bd9Sstevel@tonic-gate  * CDDL HEADER END
217c478bd9Sstevel@tonic-gate  */
227c478bd9Sstevel@tonic-gate /*
237c478bd9Sstevel@tonic-gate  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
247c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate /*
287c478bd9Sstevel@tonic-gate  * hci1394_ixl_comp.c
297c478bd9Sstevel@tonic-gate  *    Isochronous IXL Compiler.
307c478bd9Sstevel@tonic-gate  *    The compiler converts the general hardware independent IXL command
317c478bd9Sstevel@tonic-gate  *    blocks into OpenHCI DMA descriptors.
327c478bd9Sstevel@tonic-gate  */
337c478bd9Sstevel@tonic-gate 
347c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
357c478bd9Sstevel@tonic-gate #include <sys/types.h>
367c478bd9Sstevel@tonic-gate #include <sys/conf.h>
377c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
387c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
397c478bd9Sstevel@tonic-gate #include <sys/1394/h1394.h>
407c478bd9Sstevel@tonic-gate #include <sys/1394/ixl1394.h>
417c478bd9Sstevel@tonic-gate #include <sys/1394/adapters/hci1394.h>
427c478bd9Sstevel@tonic-gate 
437c478bd9Sstevel@tonic-gate /* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
447c478bd9Sstevel@tonic-gate #define	HCI1394_IXL_PAGESIZE	8000
457c478bd9Sstevel@tonic-gate 
467c478bd9Sstevel@tonic-gate /* invalid opcode */
477c478bd9Sstevel@tonic-gate #define	IXL1394_OP_INVALID  (0 | IXL1394_OPTY_OTHER)
487c478bd9Sstevel@tonic-gate 
497c478bd9Sstevel@tonic-gate /*
507c478bd9Sstevel@tonic-gate  * maximum number of interrupts permitted for a single context in which
517c478bd9Sstevel@tonic-gate  * the context does not advance to the next DMA descriptor.  Interrupts are
527c478bd9Sstevel@tonic-gate  * triggered by 1) hardware completing a DMA descriptor block which has the
537c478bd9Sstevel@tonic-gate  * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
547c478bd9Sstevel@tonic-gate  * interrupt.  Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
557c478bd9Sstevel@tonic-gate  * returned.
567c478bd9Sstevel@tonic-gate  */
577c478bd9Sstevel@tonic-gate int hci1394_ixl_max_noadv_intrs = 8;
587c478bd9Sstevel@tonic-gate 
597c478bd9Sstevel@tonic-gate 
607c478bd9Sstevel@tonic-gate static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
617c478bd9Sstevel@tonic-gate     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
627c478bd9Sstevel@tonic-gate     ixl1394_command_t *ixlp);
637c478bd9Sstevel@tonic-gate static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
647c478bd9Sstevel@tonic-gate static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
657c478bd9Sstevel@tonic-gate     ixl1394_command_t *ixlp);
667c478bd9Sstevel@tonic-gate static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
677c478bd9Sstevel@tonic-gate static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
687c478bd9Sstevel@tonic-gate static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
697c478bd9Sstevel@tonic-gate static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
707c478bd9Sstevel@tonic-gate static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
717c478bd9Sstevel@tonic-gate static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
727c478bd9Sstevel@tonic-gate static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
737c478bd9Sstevel@tonic-gate static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
747c478bd9Sstevel@tonic-gate static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
757c478bd9Sstevel@tonic-gate     caddr_t *dma_descpp, uint32_t *dma_desc_bound);
767c478bd9Sstevel@tonic-gate static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
777c478bd9Sstevel@tonic-gate static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
787c478bd9Sstevel@tonic-gate static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
797c478bd9Sstevel@tonic-gate static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
807c478bd9Sstevel@tonic-gate     uint32_t bufp, uint16_t size);
817c478bd9Sstevel@tonic-gate static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
827c478bd9Sstevel@tonic-gate     uint32_t count);
837c478bd9Sstevel@tonic-gate static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
847c478bd9Sstevel@tonic-gate static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
857c478bd9Sstevel@tonic-gate static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
867c478bd9Sstevel@tonic-gate     uint32_t dmacnt);
877c478bd9Sstevel@tonic-gate static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
887c478bd9Sstevel@tonic-gate     uint32_t size, uint32_t *dma_bound);
897c478bd9Sstevel@tonic-gate static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
907c478bd9Sstevel@tonic-gate 
917c478bd9Sstevel@tonic-gate 
927c478bd9Sstevel@tonic-gate /*
937c478bd9Sstevel@tonic-gate  * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
947c478bd9Sstevel@tonic-gate  * Receive Only:			Transmit Only:
957c478bd9Sstevel@tonic-gate  *    IXL1394_OP_RECV_PKT_ST		    IXL1394_OP_SEND_PKT_WHDR_ST
967c478bd9Sstevel@tonic-gate  *    IXL1394_OP_RECV_PKT		    IXL1394_OP_SEND_PKT_ST
977c478bd9Sstevel@tonic-gate  *    IXL1394_OP_RECV_BUF		    IXL1394_OP_SEND_PKT
987c478bd9Sstevel@tonic-gate  *    IXL1394_OP_SET_SYNCWAIT		    IXL1394_OP_SEND_BUF
997c478bd9Sstevel@tonic-gate  *					    IXL1394_OP_SEND_HDR_ONLY
1007c478bd9Sstevel@tonic-gate  * Receive or Transmit:			    IXL1394_OP_SEND_NO_PKT
1017c478bd9Sstevel@tonic-gate  *    IXL1394_OP_CALLBACK		    IXL1394_OP_SET_TAGSYNC
1027c478bd9Sstevel@tonic-gate  *    IXL1394_OP_LABEL			    IXL1394_OP_SET_SKIPMODE
1037c478bd9Sstevel@tonic-gate  *    IXL1394_OP_JUMP			    IXL1394_OP_STORE_TIMESTAMP
1047c478bd9Sstevel@tonic-gate  */
1057c478bd9Sstevel@tonic-gate 
1067c478bd9Sstevel@tonic-gate /*
1077c478bd9Sstevel@tonic-gate  * hci1394_compile_ixl()
1087c478bd9Sstevel@tonic-gate  *    Top level ixl compiler entry point.  Scans ixl and builds openHCI 1.0
1097c478bd9Sstevel@tonic-gate  *    descriptor blocks in dma memory.
1107c478bd9Sstevel@tonic-gate  */
1117c478bd9Sstevel@tonic-gate int
hci1394_compile_ixl(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp,int * resultp)1127c478bd9Sstevel@tonic-gate hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
1137c478bd9Sstevel@tonic-gate     ixl1394_command_t *ixlp, int *resultp)
1147c478bd9Sstevel@tonic-gate {
1157c478bd9Sstevel@tonic-gate 	hci1394_comp_ixl_vars_t wv;	/* working variables used throughout */
1167c478bd9Sstevel@tonic-gate 
1177c478bd9Sstevel@tonic-gate 	ASSERT(soft_statep != NULL);
1187c478bd9Sstevel@tonic-gate 	ASSERT(ctxtp != NULL);
1197c478bd9Sstevel@tonic-gate 
1207c478bd9Sstevel@tonic-gate 	/* Initialize compiler working variables */
1217c478bd9Sstevel@tonic-gate 	hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
1227c478bd9Sstevel@tonic-gate 
1237c478bd9Sstevel@tonic-gate 	/*
1247c478bd9Sstevel@tonic-gate 	 * First pass:
1257c478bd9Sstevel@tonic-gate 	 *    Parse ixl commands, building desc blocks, until end of IXL
1267c478bd9Sstevel@tonic-gate 	 *    linked list.
1277c478bd9Sstevel@tonic-gate 	 */
1287c478bd9Sstevel@tonic-gate 	hci1394_parse_ixl(&wv, ixlp);
1297c478bd9Sstevel@tonic-gate 
1307c478bd9Sstevel@tonic-gate 	/*
1317c478bd9Sstevel@tonic-gate 	 * Second pass:
1327c478bd9Sstevel@tonic-gate 	 *    Resolve all generated descriptor block jump and skip addresses.
1337c478bd9Sstevel@tonic-gate 	 *    Set interrupt enable in descriptor blocks which have callback
1347c478bd9Sstevel@tonic-gate 	 *    operations in their execution scope. (Previously store_timesamp
1357c478bd9Sstevel@tonic-gate 	 *    operations were counted also.) Set interrupt enable in descriptor
1367c478bd9Sstevel@tonic-gate 	 *    blocks which were introduced by an ixl label command.
1377c478bd9Sstevel@tonic-gate 	 */
1387c478bd9Sstevel@tonic-gate 	if (wv.dma_bld_error == 0) {
1397c478bd9Sstevel@tonic-gate 		hci1394_finalize_all_xfer_desc(&wv);
1407c478bd9Sstevel@tonic-gate 	}
1417c478bd9Sstevel@tonic-gate 
1427c478bd9Sstevel@tonic-gate 	/* Endup: finalize and cleanup ixl compile, return result */
1437c478bd9Sstevel@tonic-gate 	hci1394_compile_ixl_endup(&wv);
1447c478bd9Sstevel@tonic-gate 
1457c478bd9Sstevel@tonic-gate 	*resultp = wv.dma_bld_error;
146*2570281cSToomas Soome 	if (*resultp != 0)
1477c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
148*2570281cSToomas Soome 	return (DDI_SUCCESS);
1497c478bd9Sstevel@tonic-gate }
1507c478bd9Sstevel@tonic-gate 
1517c478bd9Sstevel@tonic-gate /*
1527c478bd9Sstevel@tonic-gate  * hci1394_compile_ixl_init()
1537c478bd9Sstevel@tonic-gate  *    Initialize the isoch context structure associated with the IXL
1547c478bd9Sstevel@tonic-gate  *    program, and initialize the temporary working variables structure.
1557c478bd9Sstevel@tonic-gate  */
1567c478bd9Sstevel@tonic-gate static void
hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t * wvp,hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp)1577c478bd9Sstevel@tonic-gate hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
1587c478bd9Sstevel@tonic-gate     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
1597c478bd9Sstevel@tonic-gate     ixl1394_command_t *ixlp)
1607c478bd9Sstevel@tonic-gate {
1617c478bd9Sstevel@tonic-gate 	/* initialize common recv/xmit compile values */
1627c478bd9Sstevel@tonic-gate 	wvp->soft_statep = soft_statep;
1637c478bd9Sstevel@tonic-gate 	wvp->ctxtp = ctxtp;
1647c478bd9Sstevel@tonic-gate 
1657c478bd9Sstevel@tonic-gate 	/* init/clear ctxtp values */
166ffc2b7d4SToomas Soome 	ctxtp->dma_mem_execp = 0;
1677c478bd9Sstevel@tonic-gate 	ctxtp->dma_firstp = NULL;
1687c478bd9Sstevel@tonic-gate 	ctxtp->dma_last_time = 0;
1697c478bd9Sstevel@tonic-gate 	ctxtp->xcs_firstp = NULL;
1707c478bd9Sstevel@tonic-gate 	ctxtp->ixl_exec_depth = 0;
1717c478bd9Sstevel@tonic-gate 	ctxtp->ixl_execp = NULL;
1727c478bd9Sstevel@tonic-gate 	ctxtp->ixl_firstp = ixlp;
1737c478bd9Sstevel@tonic-gate 	ctxtp->default_skipxferp = NULL;
1747c478bd9Sstevel@tonic-gate 
1757c478bd9Sstevel@tonic-gate 	/*
1767c478bd9Sstevel@tonic-gate 	 * the context's max_noadv_intrs is set here instead of in isoch init
1777c478bd9Sstevel@tonic-gate 	 * because the default is patchable and would only be picked up this way
1787c478bd9Sstevel@tonic-gate 	 */
1797c478bd9Sstevel@tonic-gate 	ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
1807c478bd9Sstevel@tonic-gate 
1817c478bd9Sstevel@tonic-gate 	/* init working variables */
1827c478bd9Sstevel@tonic-gate 	wvp->xcs_firstp = NULL;
1837c478bd9Sstevel@tonic-gate 	wvp->xcs_currentp = NULL;
1847c478bd9Sstevel@tonic-gate 
1857c478bd9Sstevel@tonic-gate 	wvp->dma_firstp = NULL;
1867c478bd9Sstevel@tonic-gate 	wvp->dma_currentp = NULL;
1877c478bd9Sstevel@tonic-gate 	wvp->dma_bld_error = 0;
1887c478bd9Sstevel@tonic-gate 
1897c478bd9Sstevel@tonic-gate 	wvp->ixl_io_mode = ctxtp->ctxt_flags;
1907c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_cmdp = NULL;
1917c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp = NULL;
1927c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_labelp = NULL;
1937c478bd9Sstevel@tonic-gate 
1947c478bd9Sstevel@tonic-gate 	wvp->ixl_xfer_st_cnt = 0;	/* count of xfer start commands found */
1957c478bd9Sstevel@tonic-gate 	wvp->xfer_state = XFER_NONE;	/* none, pkt, buf, skip, hdronly */
1967c478bd9Sstevel@tonic-gate 	wvp->xfer_hci_flush = 0;	/* updateable - xfer, jump, set */
1977c478bd9Sstevel@tonic-gate 	wvp->xfer_pktlen = 0;
1987c478bd9Sstevel@tonic-gate 	wvp->xfer_bufcnt = 0;
1997c478bd9Sstevel@tonic-gate 	wvp->descriptors = 0;
2007c478bd9Sstevel@tonic-gate 
2017c478bd9Sstevel@tonic-gate 	/* START RECV ONLY SECTION */
2027c478bd9Sstevel@tonic-gate 	wvp->ixl_setsyncwait_cnt = 0;
2037c478bd9Sstevel@tonic-gate 
2047c478bd9Sstevel@tonic-gate 	/* START XMIT ONLY SECTION */
2057c478bd9Sstevel@tonic-gate 	wvp->ixl_settagsync_cmdp = NULL;
2067c478bd9Sstevel@tonic-gate 	wvp->ixl_setskipmode_cmdp = NULL;
2077c478bd9Sstevel@tonic-gate 	wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
2087c478bd9Sstevel@tonic-gate 	wvp->default_skiplabelp = ctxtp->default_skiplabelp;
2097c478bd9Sstevel@tonic-gate 	wvp->default_skipxferp = NULL;
2107c478bd9Sstevel@tonic-gate 	wvp->skipmode = ctxtp->default_skipmode;
2117c478bd9Sstevel@tonic-gate 	wvp->skiplabelp = NULL;
2127c478bd9Sstevel@tonic-gate 	wvp->skipxferp = NULL;
2137c478bd9Sstevel@tonic-gate 	wvp->default_tag = ctxtp->default_tag;
2147c478bd9Sstevel@tonic-gate 	wvp->default_sync = ctxtp->default_sync;
2157c478bd9Sstevel@tonic-gate 	wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
2167c478bd9Sstevel@tonic-gate 	wvp->storevalue_data = 0;
2177c478bd9Sstevel@tonic-gate 	wvp->xmit_pkthdr1 = 0;
2187c478bd9Sstevel@tonic-gate 	wvp->xmit_pkthdr2 = 0;
2197c478bd9Sstevel@tonic-gate 	/* END XMIT ONLY SECTION */
2207c478bd9Sstevel@tonic-gate }
2217c478bd9Sstevel@tonic-gate 
2227c478bd9Sstevel@tonic-gate /*
2237c478bd9Sstevel@tonic-gate  * hci1394_compile_ixl_endup()
2247c478bd9Sstevel@tonic-gate  *    This routine is called just before the main hci1394_compile_ixl() exits.
2257c478bd9Sstevel@tonic-gate  *    It checks for errors and performs the appropriate cleanup, or it rolls any
2267c478bd9Sstevel@tonic-gate  *    relevant info from the working variables struct into the context structure
2277c478bd9Sstevel@tonic-gate  */
2287c478bd9Sstevel@tonic-gate static void
hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t * wvp)2297c478bd9Sstevel@tonic-gate hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
2307c478bd9Sstevel@tonic-gate {
2317c478bd9Sstevel@tonic-gate 	ixl1394_command_t *ixl_exec_stp;
2327c478bd9Sstevel@tonic-gate 	hci1394_idma_desc_mem_t *dma_nextp;
2337c478bd9Sstevel@tonic-gate 	int err;
2347c478bd9Sstevel@tonic-gate 
2357c478bd9Sstevel@tonic-gate 	/* error if no descriptor blocks found in ixl & created in dma memory */
2367c478bd9Sstevel@tonic-gate 	if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
2377c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
2387c478bd9Sstevel@tonic-gate 	}
2397c478bd9Sstevel@tonic-gate 
2407c478bd9Sstevel@tonic-gate 	/* if no errors yet, find the first IXL command that's a transfer cmd */
2417c478bd9Sstevel@tonic-gate 	if (wvp->dma_bld_error == 0) {
2427c478bd9Sstevel@tonic-gate 		err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
2437c478bd9Sstevel@tonic-gate 		    NULL, &ixl_exec_stp);
2447c478bd9Sstevel@tonic-gate 
2457c478bd9Sstevel@tonic-gate 		/* error if a label<->jump loop, or no xfer */
2467c478bd9Sstevel@tonic-gate 		if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
2477c478bd9Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
2487c478bd9Sstevel@tonic-gate 		}
2497c478bd9Sstevel@tonic-gate 	}
2507c478bd9Sstevel@tonic-gate 
2517c478bd9Sstevel@tonic-gate 	/* Sync all the DMA descriptor buffers */
2527c478bd9Sstevel@tonic-gate 	dma_nextp = wvp->ctxtp->dma_firstp;
2537c478bd9Sstevel@tonic-gate 	while (dma_nextp != NULL) {
2547c478bd9Sstevel@tonic-gate 		err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
2557c478bd9Sstevel@tonic-gate 		    (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
2567c478bd9Sstevel@tonic-gate 		    DDI_DMA_SYNC_FORDEV);
2577c478bd9Sstevel@tonic-gate 		if (err != DDI_SUCCESS) {
2587c478bd9Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
2597c478bd9Sstevel@tonic-gate 
2607c478bd9Sstevel@tonic-gate 			break;
2617c478bd9Sstevel@tonic-gate 		}
2627c478bd9Sstevel@tonic-gate 
2637c478bd9Sstevel@tonic-gate 		/* advance to next dma memory descriptor */
2647c478bd9Sstevel@tonic-gate 		dma_nextp = dma_nextp->dma_nextp;
2657c478bd9Sstevel@tonic-gate 	}
2667c478bd9Sstevel@tonic-gate 
2677c478bd9Sstevel@tonic-gate 	/*
2687c478bd9Sstevel@tonic-gate 	 * If error, cleanup and return. delete all allocated xfer_ctl structs
2697c478bd9Sstevel@tonic-gate 	 * and all dma descriptor page memory and its dma memory blocks too.
2707c478bd9Sstevel@tonic-gate 	 */
2717c478bd9Sstevel@tonic-gate 	if (wvp->dma_bld_error != 0) {
2727c478bd9Sstevel@tonic-gate 		wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
2737c478bd9Sstevel@tonic-gate 		wvp->ctxtp->dma_firstp = wvp->dma_firstp;
2747c478bd9Sstevel@tonic-gate 		hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
2757c478bd9Sstevel@tonic-gate 
2767c478bd9Sstevel@tonic-gate 		return;
2777c478bd9Sstevel@tonic-gate 	}
2787c478bd9Sstevel@tonic-gate 
2797c478bd9Sstevel@tonic-gate 	/* can only get to here if the first ixl transfer command is found */
2807c478bd9Sstevel@tonic-gate 
2817c478bd9Sstevel@tonic-gate 	/* set required processing vars into ctxtp struct */
2827c478bd9Sstevel@tonic-gate 	wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
2837c478bd9Sstevel@tonic-gate 	wvp->ctxtp->dma_mem_execp = 0;
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate 	/*
2867c478bd9Sstevel@tonic-gate 	 * the transfer command's compiler private xfer_ctl structure has the
2877c478bd9Sstevel@tonic-gate 	 * appropriate bound address
2887c478bd9Sstevel@tonic-gate 	 */
2897c478bd9Sstevel@tonic-gate 	wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
2907c478bd9Sstevel@tonic-gate 	    ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
2917c478bd9Sstevel@tonic-gate 	wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
2927c478bd9Sstevel@tonic-gate 	wvp->ctxtp->dma_firstp = wvp->dma_firstp;
2937c478bd9Sstevel@tonic-gate 	wvp->ctxtp->dma_last_time = 0;
2947c478bd9Sstevel@tonic-gate 	wvp->ctxtp->ixl_exec_depth = 0;
2957c478bd9Sstevel@tonic-gate 	wvp->ctxtp->ixl_execp = NULL;
2967c478bd9Sstevel@tonic-gate 
2977c478bd9Sstevel@tonic-gate 	/* compile done */
2987c478bd9Sstevel@tonic-gate }
2997c478bd9Sstevel@tonic-gate 
3007c478bd9Sstevel@tonic-gate /*
3017c478bd9Sstevel@tonic-gate  * hci1394_parse_ixl()
3027c478bd9Sstevel@tonic-gate  *    Scan IXL program and build ohci DMA descriptor blocks in dma memory.
3037c478bd9Sstevel@tonic-gate  *
3047c478bd9Sstevel@tonic-gate  *    Parse/process succeeding ixl commands until end of IXL linked list is
3057c478bd9Sstevel@tonic-gate  *    reached. Evaluate ixl syntax and build (xmit or recv) descriptor
3067c478bd9Sstevel@tonic-gate  *    blocks.  To aid execution time evaluation of current location, enable
3077c478bd9Sstevel@tonic-gate  *    status recording on each descriptor block built.
3087c478bd9Sstevel@tonic-gate  *    On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
3097c478bd9Sstevel@tonic-gate  */
3107c478bd9Sstevel@tonic-gate static void
hci1394_parse_ixl(hci1394_comp_ixl_vars_t * wvp,ixl1394_command_t * ixlp)3117c478bd9Sstevel@tonic-gate hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
3127c478bd9Sstevel@tonic-gate {
3137c478bd9Sstevel@tonic-gate 	ixl1394_command_t *ixlnextp = ixlp;	/* addr of next ixl cmd */
3147c478bd9Sstevel@tonic-gate 	ixl1394_command_t *ixlcurp = NULL;	/* addr of current ixl cmd */
3157c478bd9Sstevel@tonic-gate 	uint16_t ixlopcode = 0;			/* opcode of currnt ixl cmd */
3167c478bd9Sstevel@tonic-gate 
3177c478bd9Sstevel@tonic-gate 	uint32_t pktsize;
3187c478bd9Sstevel@tonic-gate 	uint32_t pktcnt;
3197c478bd9Sstevel@tonic-gate 
3207c478bd9Sstevel@tonic-gate 	/* follow ixl links until reach end or find error */
3217c478bd9Sstevel@tonic-gate 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
3227c478bd9Sstevel@tonic-gate 
3237c478bd9Sstevel@tonic-gate 		/* set this command as the current ixl command */
3247c478bd9Sstevel@tonic-gate 		wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
3257c478bd9Sstevel@tonic-gate 		ixlnextp = ixlcurp->next_ixlp;
3267c478bd9Sstevel@tonic-gate 
3277c478bd9Sstevel@tonic-gate 		ixlopcode = ixlcurp->ixl_opcode;
3287c478bd9Sstevel@tonic-gate 
3297c478bd9Sstevel@tonic-gate 		/* init compiler controlled values in current ixl command */
3307c478bd9Sstevel@tonic-gate 		ixlcurp->compiler_privatep = NULL;
3317c478bd9Sstevel@tonic-gate 		ixlcurp->compiler_resv = 0;
3327c478bd9Sstevel@tonic-gate 
3337c478bd9Sstevel@tonic-gate 		/* error if xmit/recv mode not appropriate for current cmd */
3347c478bd9Sstevel@tonic-gate 		if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
335ffc2b7d4SToomas Soome 		    ((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
3367c478bd9Sstevel@tonic-gate 		    (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
337ffc2b7d4SToomas Soome 		    ((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate 			/* check if command op failed because it was invalid */
3407c478bd9Sstevel@tonic-gate 			if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
3417c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
3427c478bd9Sstevel@tonic-gate 			} else {
3437c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
3447c478bd9Sstevel@tonic-gate 			}
3457c478bd9Sstevel@tonic-gate 			continue;
3467c478bd9Sstevel@tonic-gate 		}
3477c478bd9Sstevel@tonic-gate 
3487c478bd9Sstevel@tonic-gate 		/*
3497c478bd9Sstevel@tonic-gate 		 * if ends xfer flag set, finalize current xfer descriptor
3507c478bd9Sstevel@tonic-gate 		 * block build
3517c478bd9Sstevel@tonic-gate 		 */
3527c478bd9Sstevel@tonic-gate 		if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
3537c478bd9Sstevel@tonic-gate 			/* finalize any descriptor block build in progress */
3547c478bd9Sstevel@tonic-gate 			hci1394_finalize_cur_xfer_desc(wvp);
3557c478bd9Sstevel@tonic-gate 
3567c478bd9Sstevel@tonic-gate 			if (wvp->dma_bld_error != 0) {
3577c478bd9Sstevel@tonic-gate 				continue;
3587c478bd9Sstevel@tonic-gate 			}
3597c478bd9Sstevel@tonic-gate 		}
3607c478bd9Sstevel@tonic-gate 
3617c478bd9Sstevel@tonic-gate 		/*
3627c478bd9Sstevel@tonic-gate 		 * now process based on specific opcode value
3637c478bd9Sstevel@tonic-gate 		 */
3647c478bd9Sstevel@tonic-gate 		switch (ixlopcode) {
3657c478bd9Sstevel@tonic-gate 
3667c478bd9Sstevel@tonic-gate 		case IXL1394_OP_RECV_BUF:
3677c478bd9Sstevel@tonic-gate 		case IXL1394_OP_RECV_BUF_U: {
3687c478bd9Sstevel@tonic-gate 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
3697c478bd9Sstevel@tonic-gate 
3707c478bd9Sstevel@tonic-gate 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
3717c478bd9Sstevel@tonic-gate 
3727c478bd9Sstevel@tonic-gate 			/*
3737c478bd9Sstevel@tonic-gate 			 * In packet-per-buffer mode:
3747c478bd9Sstevel@tonic-gate 			 *    This ixl command builds a collection of xfer
3757c478bd9Sstevel@tonic-gate 			 *    descriptor blocks (size/pkt_size of them) each to
3767c478bd9Sstevel@tonic-gate 			 *    recv a packet whose buffer size is pkt_size and
3777c478bd9Sstevel@tonic-gate 			 *    whose buffer ptr is (pktcur*pkt_size + bufp)
3787c478bd9Sstevel@tonic-gate 			 *
3797c478bd9Sstevel@tonic-gate 			 * In buffer fill mode:
3807c478bd9Sstevel@tonic-gate 			 *    This ixl command builds a single xfer descriptor
3817c478bd9Sstevel@tonic-gate 			 *    block to recv as many packets or parts of packets
3827c478bd9Sstevel@tonic-gate 			 *    as can fit into the buffer size specified
3837c478bd9Sstevel@tonic-gate 			 *    (pkt_size is not used).
3847c478bd9Sstevel@tonic-gate 			 */
3857c478bd9Sstevel@tonic-gate 
3867c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
3877c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XFER_BUF;
3887c478bd9Sstevel@tonic-gate 
3897c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
3907c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
3917c478bd9Sstevel@tonic-gate 
3927c478bd9Sstevel@tonic-gate 			/*
3937c478bd9Sstevel@tonic-gate 			 * perform packet-per-buffer checks
3947c478bd9Sstevel@tonic-gate 			 * (no checks needed when in buffer fill mode)
3957c478bd9Sstevel@tonic-gate 			 */
3967c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
3977c478bd9Sstevel@tonic-gate 
3987c478bd9Sstevel@tonic-gate 				/* the packets must use the buffer exactly */
3997c478bd9Sstevel@tonic-gate 				pktsize = cur_xfer_buf_ixlp->pkt_size;
4007c478bd9Sstevel@tonic-gate 				pktcnt = 0;
4017c478bd9Sstevel@tonic-gate 				if (pktsize != 0) {
4027c478bd9Sstevel@tonic-gate 					pktcnt = cur_xfer_buf_ixlp->size /
4037c478bd9Sstevel@tonic-gate 					    pktsize;
4047c478bd9Sstevel@tonic-gate 				}
4057c478bd9Sstevel@tonic-gate 				if ((pktcnt == 0) || ((pktsize * pktcnt) !=
4067c478bd9Sstevel@tonic-gate 				    cur_xfer_buf_ixlp->size)) {
4077c478bd9Sstevel@tonic-gate 					wvp->dma_bld_error =
4087c478bd9Sstevel@tonic-gate 					    IXL1394_EPKTSIZE_RATIO;
4097c478bd9Sstevel@tonic-gate 					continue;
4107c478bd9Sstevel@tonic-gate 				}
4117c478bd9Sstevel@tonic-gate 			}
4127c478bd9Sstevel@tonic-gate 
4137c478bd9Sstevel@tonic-gate 			/*
4147c478bd9Sstevel@tonic-gate 			 * set buffer pointer & size into first xfer_bufp
4157c478bd9Sstevel@tonic-gate 			 * and xfer_size
4167c478bd9Sstevel@tonic-gate 			 */
4177c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
4187c478bd9Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
4197c478bd9Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
4207c478bd9Sstevel@tonic-gate 
4217c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
4227c478bd9Sstevel@tonic-gate 				continue;
4237c478bd9Sstevel@tonic-gate 			}
4247c478bd9Sstevel@tonic-gate 			break;
4257c478bd9Sstevel@tonic-gate 		}
4267c478bd9Sstevel@tonic-gate 
4277c478bd9Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT_ST:
4287c478bd9Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT_ST_U: {
4297c478bd9Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
4307c478bd9Sstevel@tonic-gate 
4317c478bd9Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
4327c478bd9Sstevel@tonic-gate 
4337c478bd9Sstevel@tonic-gate 			/* error if in buffer fill mode */
4347c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
4357c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
4367c478bd9Sstevel@tonic-gate 				continue;
4377c478bd9Sstevel@tonic-gate 			}
4387c478bd9Sstevel@tonic-gate 
4397c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
4407c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
4417c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XFER_PKT;
4427c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
4437c478bd9Sstevel@tonic-gate 
4447c478bd9Sstevel@tonic-gate 			/*
4457c478bd9Sstevel@tonic-gate 			 * set buffer pointer & size into first xfer_bufp
4467c478bd9Sstevel@tonic-gate 			 * and xfer_size
4477c478bd9Sstevel@tonic-gate 			 */
4487c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
4497c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
4507c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
4517c478bd9Sstevel@tonic-gate 
4527c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
4537c478bd9Sstevel@tonic-gate 				continue;
4547c478bd9Sstevel@tonic-gate 			}
4557c478bd9Sstevel@tonic-gate 			break;
4567c478bd9Sstevel@tonic-gate 		}
4577c478bd9Sstevel@tonic-gate 
4587c478bd9Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT:
4597c478bd9Sstevel@tonic-gate 		case IXL1394_OP_RECV_PKT_U: {
4607c478bd9Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
4617c478bd9Sstevel@tonic-gate 
4627c478bd9Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
4637c478bd9Sstevel@tonic-gate 
4647c478bd9Sstevel@tonic-gate 			/* error if in buffer fill mode */
4657c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
4667c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
4677c478bd9Sstevel@tonic-gate 				continue;
4687c478bd9Sstevel@tonic-gate 			}
4697c478bd9Sstevel@tonic-gate 
4707c478bd9Sstevel@tonic-gate 			/* error if xfer_state not xfer pkt */
4717c478bd9Sstevel@tonic-gate 			if (wvp->xfer_state != XFER_PKT) {
4727c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
4737c478bd9Sstevel@tonic-gate 				continue;
4747c478bd9Sstevel@tonic-gate 			}
4757c478bd9Sstevel@tonic-gate 
4767c478bd9Sstevel@tonic-gate 			/*
4777c478bd9Sstevel@tonic-gate 			 * save xfer start cmd ixl ptr in compiler_privatep
4787c478bd9Sstevel@tonic-gate 			 * field of this cmd
4797c478bd9Sstevel@tonic-gate 			 */
4807c478bd9Sstevel@tonic-gate 			ixlcurp->compiler_privatep = (void *)
4817c478bd9Sstevel@tonic-gate 			    wvp->ixl_cur_xfer_stp;
4827c478bd9Sstevel@tonic-gate 
4837c478bd9Sstevel@tonic-gate 			/*
4847c478bd9Sstevel@tonic-gate 			 * save pkt index [1-n] in compiler_resv field of
4857c478bd9Sstevel@tonic-gate 			 * this cmd
4867c478bd9Sstevel@tonic-gate 			 */
4877c478bd9Sstevel@tonic-gate 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
4887c478bd9Sstevel@tonic-gate 
4897c478bd9Sstevel@tonic-gate 			/*
4907c478bd9Sstevel@tonic-gate 			 * set buffer pointer & size into next xfer_bufp
4917c478bd9Sstevel@tonic-gate 			 * and xfer_size
4927c478bd9Sstevel@tonic-gate 			 */
4937c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
4947c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
4957c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
4967c478bd9Sstevel@tonic-gate 
4977c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
4987c478bd9Sstevel@tonic-gate 				continue;
4997c478bd9Sstevel@tonic-gate 			}
5007c478bd9Sstevel@tonic-gate 
5017c478bd9Sstevel@tonic-gate 			/*
5027c478bd9Sstevel@tonic-gate 			 * set updateable xfer cache flush eval flag if
5037c478bd9Sstevel@tonic-gate 			 * updateable opcode
5047c478bd9Sstevel@tonic-gate 			 */
5057c478bd9Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
5067c478bd9Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
5077c478bd9Sstevel@tonic-gate 			}
5087c478bd9Sstevel@tonic-gate 			break;
5097c478bd9Sstevel@tonic-gate 		}
5107c478bd9Sstevel@tonic-gate 
5117c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_BUF:
5127c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_BUF_U: {
5137c478bd9Sstevel@tonic-gate 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
5147c478bd9Sstevel@tonic-gate 
5157c478bd9Sstevel@tonic-gate 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
5167c478bd9Sstevel@tonic-gate 
5177c478bd9Sstevel@tonic-gate 			/*
5187c478bd9Sstevel@tonic-gate 			 * These send_buf commands build a collection of xmit
5197c478bd9Sstevel@tonic-gate 			 * descriptor blocks (size/pkt_size of them) each to
5207c478bd9Sstevel@tonic-gate 			 * xfer a packet whose buffer size is pkt_size and whose
5217c478bd9Sstevel@tonic-gate 			 * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
5227c478bd9Sstevel@tonic-gate 			 * are adjusted if they have header form of ixl cmd)
5237c478bd9Sstevel@tonic-gate 			 */
5247c478bd9Sstevel@tonic-gate 
5257c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
5267c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XFER_BUF;
5277c478bd9Sstevel@tonic-gate 
5287c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
5297c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
5307c478bd9Sstevel@tonic-gate 
5317c478bd9Sstevel@tonic-gate 			/* the packets must use the buffer exactly,else error */
5327c478bd9Sstevel@tonic-gate 			pktsize = cur_xfer_buf_ixlp->pkt_size;
5337c478bd9Sstevel@tonic-gate 			pktcnt = 0;
5347c478bd9Sstevel@tonic-gate 			if (pktsize != 0) {
5357c478bd9Sstevel@tonic-gate 				pktcnt = cur_xfer_buf_ixlp->size / pktsize;
5367c478bd9Sstevel@tonic-gate 			}
5377c478bd9Sstevel@tonic-gate 			if ((pktcnt == 0) || ((pktsize * pktcnt) !=
5387c478bd9Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->size)) {
5397c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
5407c478bd9Sstevel@tonic-gate 				continue;
5417c478bd9Sstevel@tonic-gate 			}
5427c478bd9Sstevel@tonic-gate 
5437c478bd9Sstevel@tonic-gate 			/* set buf ptr & size into 1st xfer_bufp & xfer_size */
5447c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
5457c478bd9Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
5467c478bd9Sstevel@tonic-gate 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
5477c478bd9Sstevel@tonic-gate 
5487c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
5497c478bd9Sstevel@tonic-gate 				continue;
5507c478bd9Sstevel@tonic-gate 			}
5517c478bd9Sstevel@tonic-gate 			break;
5527c478bd9Sstevel@tonic-gate 		}
5537c478bd9Sstevel@tonic-gate 
5547c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_ST:
5557c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_ST_U: {
5567c478bd9Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
5577c478bd9Sstevel@tonic-gate 
5587c478bd9Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
5597c478bd9Sstevel@tonic-gate 
5607c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
5617c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
5627c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XFER_PKT;
5637c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
5647c478bd9Sstevel@tonic-gate 
5657c478bd9Sstevel@tonic-gate 			/*
5667c478bd9Sstevel@tonic-gate 			 * set buffer pointer & size into first xfer_bufp and
5677c478bd9Sstevel@tonic-gate 			 * xfer_size
5687c478bd9Sstevel@tonic-gate 			 */
5697c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
5707c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
5717c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
5727c478bd9Sstevel@tonic-gate 
5737c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
5747c478bd9Sstevel@tonic-gate 				continue;
5757c478bd9Sstevel@tonic-gate 			}
5767c478bd9Sstevel@tonic-gate 			break;
5777c478bd9Sstevel@tonic-gate 		}
5787c478bd9Sstevel@tonic-gate 
5797c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_WHDR_ST:
5807c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
5817c478bd9Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
5827c478bd9Sstevel@tonic-gate 
5837c478bd9Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
5847c478bd9Sstevel@tonic-gate 
5857c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
5867c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
5877c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XFER_PKT;
5887c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
5897c478bd9Sstevel@tonic-gate 
5907c478bd9Sstevel@tonic-gate 			/*
5917c478bd9Sstevel@tonic-gate 			 * buffer size must be at least 4 (must include header),
5927c478bd9Sstevel@tonic-gate 			 * else error
5937c478bd9Sstevel@tonic-gate 			 */
5947c478bd9Sstevel@tonic-gate 			if (cur_xfer_pkt_ixlp->size < 4) {
5957c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
5967c478bd9Sstevel@tonic-gate 				continue;
5977c478bd9Sstevel@tonic-gate 			}
5987c478bd9Sstevel@tonic-gate 
5997c478bd9Sstevel@tonic-gate 			/*
6007c478bd9Sstevel@tonic-gate 			 * set buffer and size(excluding header) into first
6017c478bd9Sstevel@tonic-gate 			 * xfer_bufp and xfer_size
6027c478bd9Sstevel@tonic-gate 			 */
6037c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
6047c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
6057c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
6067c478bd9Sstevel@tonic-gate 
6077c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
6087c478bd9Sstevel@tonic-gate 				continue;
6097c478bd9Sstevel@tonic-gate 			}
6107c478bd9Sstevel@tonic-gate 			break;
6117c478bd9Sstevel@tonic-gate 		}
6127c478bd9Sstevel@tonic-gate 
6137c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT:
6147c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_PKT_U: {
6157c478bd9Sstevel@tonic-gate 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
6167c478bd9Sstevel@tonic-gate 
6177c478bd9Sstevel@tonic-gate 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
6187c478bd9Sstevel@tonic-gate 
6197c478bd9Sstevel@tonic-gate 			/* error if xfer_state not xfer pkt */
6207c478bd9Sstevel@tonic-gate 			if (wvp->xfer_state != XFER_PKT) {
6217c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
6227c478bd9Sstevel@tonic-gate 				continue;
6237c478bd9Sstevel@tonic-gate 			}
6247c478bd9Sstevel@tonic-gate 
6257c478bd9Sstevel@tonic-gate 			/*
6267c478bd9Sstevel@tonic-gate 			 * save xfer start cmd ixl ptr in compiler_privatep
6277c478bd9Sstevel@tonic-gate 			 * field of this cmd
6287c478bd9Sstevel@tonic-gate 			 */
6297c478bd9Sstevel@tonic-gate 			ixlcurp->compiler_privatep = (void *)
6307c478bd9Sstevel@tonic-gate 			    wvp->ixl_cur_xfer_stp;
6317c478bd9Sstevel@tonic-gate 
6327c478bd9Sstevel@tonic-gate 			/*
6337c478bd9Sstevel@tonic-gate 			 * save pkt index [1-n] in compiler_resv field of this
6347c478bd9Sstevel@tonic-gate 			 * cmd
6357c478bd9Sstevel@tonic-gate 			 */
6367c478bd9Sstevel@tonic-gate 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
6377c478bd9Sstevel@tonic-gate 
6387c478bd9Sstevel@tonic-gate 			/*
6397c478bd9Sstevel@tonic-gate 			 * set buffer pointer & size into next xfer_bufp
6407c478bd9Sstevel@tonic-gate 			 * and xfer_size
6417c478bd9Sstevel@tonic-gate 			 */
6427c478bd9Sstevel@tonic-gate 			if (hci1394_set_next_xfer_buf(wvp,
6437c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
6447c478bd9Sstevel@tonic-gate 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
6457c478bd9Sstevel@tonic-gate 
6467c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
6477c478bd9Sstevel@tonic-gate 				continue;
6487c478bd9Sstevel@tonic-gate 			}
6497c478bd9Sstevel@tonic-gate 
6507c478bd9Sstevel@tonic-gate 			/*
6517c478bd9Sstevel@tonic-gate 			 * set updateable xfer cache flush eval flag if
6527c478bd9Sstevel@tonic-gate 			 * updateable opcode
6537c478bd9Sstevel@tonic-gate 			 */
6547c478bd9Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
6557c478bd9Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
6567c478bd9Sstevel@tonic-gate 			}
6577c478bd9Sstevel@tonic-gate 			break;
6587c478bd9Sstevel@tonic-gate 		}
6597c478bd9Sstevel@tonic-gate 
6607c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_HDR_ONLY:
6617c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
6627c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XMIT_HDRONLY;
6637c478bd9Sstevel@tonic-gate 
6647c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
6657c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
6667c478bd9Sstevel@tonic-gate 			break;
6677c478bd9Sstevel@tonic-gate 
6687c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SEND_NO_PKT:
6697c478bd9Sstevel@tonic-gate 			/* set xfer_state for new descriptor block build */
6707c478bd9Sstevel@tonic-gate 			wvp->xfer_state = XMIT_NOPKT;
6717c478bd9Sstevel@tonic-gate 
6727c478bd9Sstevel@tonic-gate 			/* set this ixl command as current xferstart command */
6737c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_xfer_stp = ixlcurp;
6747c478bd9Sstevel@tonic-gate 			break;
6757c478bd9Sstevel@tonic-gate 
6767c478bd9Sstevel@tonic-gate 		case IXL1394_OP_JUMP:
6777c478bd9Sstevel@tonic-gate 		case IXL1394_OP_JUMP_U: {
6787c478bd9Sstevel@tonic-gate 			ixl1394_jump_t *cur_jump_ixlp;
6797c478bd9Sstevel@tonic-gate 
6807c478bd9Sstevel@tonic-gate 			cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
6817c478bd9Sstevel@tonic-gate 
6827c478bd9Sstevel@tonic-gate 			/*
6837c478bd9Sstevel@tonic-gate 			 * verify label indicated by IXL1394_OP_JUMP is
6847c478bd9Sstevel@tonic-gate 			 * actually an IXL1394_OP_LABEL or NULL
6857c478bd9Sstevel@tonic-gate 			 */
6867c478bd9Sstevel@tonic-gate 			if ((cur_jump_ixlp->label != NULL) &&
6877c478bd9Sstevel@tonic-gate 			    (cur_jump_ixlp->label->ixl_opcode !=
6887c478bd9Sstevel@tonic-gate 			    IXL1394_OP_LABEL)) {
6897c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
6907c478bd9Sstevel@tonic-gate 				continue;
6917c478bd9Sstevel@tonic-gate 			}
6927c478bd9Sstevel@tonic-gate 			break;
6937c478bd9Sstevel@tonic-gate 		}
6947c478bd9Sstevel@tonic-gate 
6957c478bd9Sstevel@tonic-gate 		case IXL1394_OP_LABEL:
6967c478bd9Sstevel@tonic-gate 			/*
6977c478bd9Sstevel@tonic-gate 			 * save current ixl label command for xfer cmd
6987c478bd9Sstevel@tonic-gate 			 * finalize processing
6997c478bd9Sstevel@tonic-gate 			 */
7007c478bd9Sstevel@tonic-gate 			wvp->ixl_cur_labelp = ixlcurp;
7017c478bd9Sstevel@tonic-gate 
7027c478bd9Sstevel@tonic-gate 			/* set initiating label flag to cause cache flush */
7037c478bd9Sstevel@tonic-gate 			wvp->xfer_hci_flush |= INITIATING_LBL;
7047c478bd9Sstevel@tonic-gate 			break;
7057c478bd9Sstevel@tonic-gate 
7067c478bd9Sstevel@tonic-gate 		case IXL1394_OP_CALLBACK:
7077c478bd9Sstevel@tonic-gate 		case IXL1394_OP_CALLBACK_U:
7087c478bd9Sstevel@tonic-gate 		case IXL1394_OP_STORE_TIMESTAMP:
7097c478bd9Sstevel@tonic-gate 			/*
7107c478bd9Sstevel@tonic-gate 			 * these commands are accepted during compile,
7117c478bd9Sstevel@tonic-gate 			 * processed during execution (interrupt handling)
7127c478bd9Sstevel@tonic-gate 			 * No further processing is needed here.
7137c478bd9Sstevel@tonic-gate 			 */
7147c478bd9Sstevel@tonic-gate 			break;
7157c478bd9Sstevel@tonic-gate 
7167c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SET_SKIPMODE:
7177c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SET_SKIPMODE_U:
7187c478bd9Sstevel@tonic-gate 			/*
7197c478bd9Sstevel@tonic-gate 			 * Error if already have a set skipmode cmd for
7207c478bd9Sstevel@tonic-gate 			 * this xfer
7217c478bd9Sstevel@tonic-gate 			 */
7227c478bd9Sstevel@tonic-gate 			if (wvp->ixl_setskipmode_cmdp != NULL) {
7237c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
7247c478bd9Sstevel@tonic-gate 				continue;
7257c478bd9Sstevel@tonic-gate 			}
7267c478bd9Sstevel@tonic-gate 
7277c478bd9Sstevel@tonic-gate 			/* save skip mode ixl command and verify skipmode */
7287c478bd9Sstevel@tonic-gate 			wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
7297c478bd9Sstevel@tonic-gate 			    ixlcurp;
7307c478bd9Sstevel@tonic-gate 
7317c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_setskipmode_cmdp->skipmode !=
732ffc2b7d4SToomas Soome 			    IXL1394_SKIP_TO_NEXT) &&
7337c478bd9Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
734ffc2b7d4SToomas Soome 			    IXL1394_SKIP_TO_SELF) &&
7357c478bd9Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
736ffc2b7d4SToomas Soome 			    IXL1394_SKIP_TO_STOP) &&
7377c478bd9Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
738ffc2b7d4SToomas Soome 			    IXL1394_SKIP_TO_LABEL)) {
7397c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
7407c478bd9Sstevel@tonic-gate 				continue;
7417c478bd9Sstevel@tonic-gate 			}
7427c478bd9Sstevel@tonic-gate 
7437c478bd9Sstevel@tonic-gate 			/*
7447c478bd9Sstevel@tonic-gate 			 * if mode is IXL1394_SKIP_TO_LABEL, verify label
7457c478bd9Sstevel@tonic-gate 			 * references an IXL1394_OP_LABEL
7467c478bd9Sstevel@tonic-gate 			 */
7477c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_setskipmode_cmdp->skipmode ==
748ffc2b7d4SToomas Soome 			    IXL1394_SKIP_TO_LABEL) &&
7497c478bd9Sstevel@tonic-gate 			    ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
7507c478bd9Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
751ffc2b7d4SToomas Soome 			    IXL1394_OP_LABEL))) {
7527c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
7537c478bd9Sstevel@tonic-gate 				continue;
7547c478bd9Sstevel@tonic-gate 			}
7557c478bd9Sstevel@tonic-gate 			/*
7567c478bd9Sstevel@tonic-gate 			 * set updateable set cmd cache flush eval flag if
7577c478bd9Sstevel@tonic-gate 			 * updateable opcode
7587c478bd9Sstevel@tonic-gate 			 */
7597c478bd9Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
7607c478bd9Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
7617c478bd9Sstevel@tonic-gate 			}
7627c478bd9Sstevel@tonic-gate 			break;
7637c478bd9Sstevel@tonic-gate 
7647c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SET_TAGSYNC:
7657c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SET_TAGSYNC_U:
7667c478bd9Sstevel@tonic-gate 			/*
7677c478bd9Sstevel@tonic-gate 			 * is an error if already have a set tag and sync cmd
7687c478bd9Sstevel@tonic-gate 			 * for this xfer
7697c478bd9Sstevel@tonic-gate 			 */
7707c478bd9Sstevel@tonic-gate 			if (wvp->ixl_settagsync_cmdp != NULL) {
7717c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
7727c478bd9Sstevel@tonic-gate 				continue;
7737c478bd9Sstevel@tonic-gate 			}
7747c478bd9Sstevel@tonic-gate 
7757c478bd9Sstevel@tonic-gate 			/* save ixl command containing tag and sync values */
7767c478bd9Sstevel@tonic-gate 			wvp->ixl_settagsync_cmdp =
7777c478bd9Sstevel@tonic-gate 			    (ixl1394_set_tagsync_t *)ixlcurp;
7787c478bd9Sstevel@tonic-gate 
7797c478bd9Sstevel@tonic-gate 			/*
7807c478bd9Sstevel@tonic-gate 			 * set updateable set cmd cache flush eval flag if
7817c478bd9Sstevel@tonic-gate 			 * updateable opcode
7827c478bd9Sstevel@tonic-gate 			 */
7837c478bd9Sstevel@tonic-gate 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
7847c478bd9Sstevel@tonic-gate 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
7857c478bd9Sstevel@tonic-gate 			}
7867c478bd9Sstevel@tonic-gate 			break;
7877c478bd9Sstevel@tonic-gate 
7887c478bd9Sstevel@tonic-gate 		case IXL1394_OP_SET_SYNCWAIT:
7897c478bd9Sstevel@tonic-gate 			/*
7907c478bd9Sstevel@tonic-gate 			 * count ixl wait-for-sync commands since last
7917c478bd9Sstevel@tonic-gate 			 * finalize ignore multiple occurrences for same xfer
7927c478bd9Sstevel@tonic-gate 			 * command
7937c478bd9Sstevel@tonic-gate 			 */
7947c478bd9Sstevel@tonic-gate 			wvp->ixl_setsyncwait_cnt++;
7957c478bd9Sstevel@tonic-gate 			break;
7967c478bd9Sstevel@tonic-gate 
7977c478bd9Sstevel@tonic-gate 		default:
7987c478bd9Sstevel@tonic-gate 			/* error - unknown/unimplemented ixl command */
7997c478bd9Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
8007c478bd9Sstevel@tonic-gate 			continue;
8017c478bd9Sstevel@tonic-gate 		}
8027c478bd9Sstevel@tonic-gate 	} /* while */
8037c478bd9Sstevel@tonic-gate 
8047c478bd9Sstevel@tonic-gate 	/* finalize any last descriptor block build */
8057c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_cmdp = NULL;
8067c478bd9Sstevel@tonic-gate 	if (wvp->dma_bld_error == 0) {
8077c478bd9Sstevel@tonic-gate 		hci1394_finalize_cur_xfer_desc(wvp);
8087c478bd9Sstevel@tonic-gate 	}
8097c478bd9Sstevel@tonic-gate }
8107c478bd9Sstevel@tonic-gate 
8117c478bd9Sstevel@tonic-gate /*
8127c478bd9Sstevel@tonic-gate  * hci1394_finalize_all_xfer_desc()
8137c478bd9Sstevel@tonic-gate  *    Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
8147c478bd9Sstevel@tonic-gate  *
8157c478bd9Sstevel@tonic-gate  *    Set interrupt enable on first descriptor block associated with current
8167c478bd9Sstevel@tonic-gate  *    xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
8177c478bd9Sstevel@tonic-gate  *
8187c478bd9Sstevel@tonic-gate  *    Set interrupt enable on last descriptor block associated with current xfer
8197c478bd9Sstevel@tonic-gate  *    IXL command if any callback ixl commands are found on the execution path
8207c478bd9Sstevel@tonic-gate  *    between the current and the next xfer ixl command.  (Previously, this
8217c478bd9Sstevel@tonic-gate  *    applied to store timestamp ixl commands, as well.)
8227c478bd9Sstevel@tonic-gate  */
8237c478bd9Sstevel@tonic-gate static void
hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t * wvp)8247c478bd9Sstevel@tonic-gate hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
8257c478bd9Sstevel@tonic-gate {
8267c478bd9Sstevel@tonic-gate 	ixl1394_command_t *ixlcurp;		/* current ixl command */
8277c478bd9Sstevel@tonic-gate 	ixl1394_command_t *ixlnextp;		/* next ixl command */
8287c478bd9Sstevel@tonic-gate 	ixl1394_command_t *ixlexecnext;
8297c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xferctl_curp;
8307c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xferctl_nxtp;
8317c478bd9Sstevel@tonic-gate 	hci1394_desc_t		*hcidescp;
8327c478bd9Sstevel@tonic-gate 	ddi_acc_handle_t	acc_hdl;
8337c478bd9Sstevel@tonic-gate 	uint32_t	temp;
8347c478bd9Sstevel@tonic-gate 	uint32_t	dma_execnext_addr;
8357c478bd9Sstevel@tonic-gate 	uint32_t	dma_skiplabel_addr;
8367c478bd9Sstevel@tonic-gate 	uint32_t	dma_skip_addr;
8377c478bd9Sstevel@tonic-gate 	uint32_t	callback_cnt;
8387c478bd9Sstevel@tonic-gate 	uint16_t	repcnt;
8397c478bd9Sstevel@tonic-gate 	uint16_t	ixlopcode;
8407c478bd9Sstevel@tonic-gate 	int		ii;
8417c478bd9Sstevel@tonic-gate 	int		err;
8427c478bd9Sstevel@tonic-gate 
8437c478bd9Sstevel@tonic-gate 	/*
8447c478bd9Sstevel@tonic-gate 	 * If xmit mode and if default skipmode is skip to label -
8457c478bd9Sstevel@tonic-gate 	 * follow exec path starting at default skipmode label until
8467c478bd9Sstevel@tonic-gate 	 * find the first ixl xfer command which is to be executed.
8477c478bd9Sstevel@tonic-gate 	 * Set its address into default_skipxferp.
8487c478bd9Sstevel@tonic-gate 	 */
8497c478bd9Sstevel@tonic-gate 	if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
8507c478bd9Sstevel@tonic-gate 	    (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
8517c478bd9Sstevel@tonic-gate 
8527c478bd9Sstevel@tonic-gate 		err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
8537c478bd9Sstevel@tonic-gate 		    NULL, &wvp->default_skipxferp);
8547c478bd9Sstevel@tonic-gate 		if (err == DDI_FAILURE) {
8557c478bd9Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
8567c478bd9Sstevel@tonic-gate 			return;
8577c478bd9Sstevel@tonic-gate 		}
8587c478bd9Sstevel@tonic-gate 	}
8597c478bd9Sstevel@tonic-gate 
8607c478bd9Sstevel@tonic-gate 	/* set first ixl cmd */
8617c478bd9Sstevel@tonic-gate 	ixlnextp = wvp->ctxtp->ixl_firstp;
8627c478bd9Sstevel@tonic-gate 
8637c478bd9Sstevel@tonic-gate 	/* follow ixl links until reach end or find error */
8647c478bd9Sstevel@tonic-gate 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
8657c478bd9Sstevel@tonic-gate 
8667c478bd9Sstevel@tonic-gate 		/* set this command as the current ixl command */
8677c478bd9Sstevel@tonic-gate 		ixlcurp = ixlnextp;
8687c478bd9Sstevel@tonic-gate 		ixlnextp = ixlcurp->next_ixlp;
8697c478bd9Sstevel@tonic-gate 
8707c478bd9Sstevel@tonic-gate 		/* get command opcode removing unneeded update flag */
8717c478bd9Sstevel@tonic-gate 		ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
8727c478bd9Sstevel@tonic-gate 
8737c478bd9Sstevel@tonic-gate 		/*
8747c478bd9Sstevel@tonic-gate 		 * Scan for next ixl xfer start command (including this one),
8757c478bd9Sstevel@tonic-gate 		 * along ixl link path. Once xfer command found, find next IXL
8767c478bd9Sstevel@tonic-gate 		 * xfer cmd along execution path and fill in branch address of
8777c478bd9Sstevel@tonic-gate 		 * current xfer command. If is composite ixl xfer command, first
8787c478bd9Sstevel@tonic-gate 		 * link forward branch dma addresses of each descriptor block in
8797c478bd9Sstevel@tonic-gate 		 * composite, until reach final one then set its branch address
8807c478bd9Sstevel@tonic-gate 		 * to next execution path xfer found.  Next determine skip mode
8817c478bd9Sstevel@tonic-gate 		 * and fill in skip address(es) appropriately.
8827c478bd9Sstevel@tonic-gate 		 */
8837c478bd9Sstevel@tonic-gate 		/* skip to next if not xfer start ixl command */
8847c478bd9Sstevel@tonic-gate 		if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
8857c478bd9Sstevel@tonic-gate 		    ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
8867c478bd9Sstevel@tonic-gate 			continue;
8877c478bd9Sstevel@tonic-gate 		}
8887c478bd9Sstevel@tonic-gate 
8897c478bd9Sstevel@tonic-gate 		/*
8907c478bd9Sstevel@tonic-gate 		 * get xfer_ctl structure and composite repeat count for current
8917c478bd9Sstevel@tonic-gate 		 * IXL xfer cmd
8927c478bd9Sstevel@tonic-gate 		 */
8937c478bd9Sstevel@tonic-gate 		xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
8947c478bd9Sstevel@tonic-gate 		repcnt = xferctl_curp->cnt;
8957c478bd9Sstevel@tonic-gate 
8967c478bd9Sstevel@tonic-gate 		/*
8977c478bd9Sstevel@tonic-gate 		 * if initiated by an IXL label command, set interrupt enable
8987c478bd9Sstevel@tonic-gate 		 * flag into last component of first descriptor block of
8997c478bd9Sstevel@tonic-gate 		 * current IXL xfer cmd
9007c478bd9Sstevel@tonic-gate 		 */
9017c478bd9Sstevel@tonic-gate 		if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
9027c478bd9Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
9037c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[0].dma_descp;
9047c478bd9Sstevel@tonic-gate 			acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
9057c478bd9Sstevel@tonic-gate 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
9067c478bd9Sstevel@tonic-gate 			temp |= DESC_INTR_ENBL;
9077c478bd9Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
9087c478bd9Sstevel@tonic-gate 		}
9097c478bd9Sstevel@tonic-gate 
9107c478bd9Sstevel@tonic-gate 		/* find next xfer IXL cmd by following execution path */
9117c478bd9Sstevel@tonic-gate 		err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
9127c478bd9Sstevel@tonic-gate 		    &callback_cnt, &ixlexecnext);
9137c478bd9Sstevel@tonic-gate 
9147c478bd9Sstevel@tonic-gate 		/* if label<->jump loop detected, return error */
9157c478bd9Sstevel@tonic-gate 		if (err == DDI_FAILURE) {
9167c478bd9Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
9177c478bd9Sstevel@tonic-gate 			continue;
9187c478bd9Sstevel@tonic-gate 		}
9197c478bd9Sstevel@tonic-gate 
9207c478bd9Sstevel@tonic-gate 		/* link current IXL's xfer_ctl to next xfer IXL on exec path */
9217c478bd9Sstevel@tonic-gate 		xferctl_curp->execp = ixlexecnext;
9227c478bd9Sstevel@tonic-gate 
9237c478bd9Sstevel@tonic-gate 		/*
9247c478bd9Sstevel@tonic-gate 		 * if callbacks have been seen during execution path scan,
9257c478bd9Sstevel@tonic-gate 		 * set interrupt enable flag into last descriptor of last
9267c478bd9Sstevel@tonic-gate 		 * descriptor block of current IXL xfer cmd
9277c478bd9Sstevel@tonic-gate 		 */
9287c478bd9Sstevel@tonic-gate 		if (callback_cnt != 0) {
9297c478bd9Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
9307c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_descp;
9317c478bd9Sstevel@tonic-gate 			acc_hdl =
9327c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
9337c478bd9Sstevel@tonic-gate 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
9347c478bd9Sstevel@tonic-gate 			temp |= DESC_INTR_ENBL;
9357c478bd9Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
9367c478bd9Sstevel@tonic-gate 		}
9377c478bd9Sstevel@tonic-gate 
9387c478bd9Sstevel@tonic-gate 		/*
9397c478bd9Sstevel@tonic-gate 		 * obtain dma bound addr of next exec path IXL xfer command,
9407c478bd9Sstevel@tonic-gate 		 * if any
9417c478bd9Sstevel@tonic-gate 		 */
9427c478bd9Sstevel@tonic-gate 		dma_execnext_addr = 0;
9437c478bd9Sstevel@tonic-gate 
9447c478bd9Sstevel@tonic-gate 		if (ixlexecnext != NULL) {
9457c478bd9Sstevel@tonic-gate 			xferctl_nxtp = (hci1394_xfer_ctl_t *)
9467c478bd9Sstevel@tonic-gate 			    ixlexecnext->compiler_privatep;
9477c478bd9Sstevel@tonic-gate 			dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
9487c478bd9Sstevel@tonic-gate 		} else {
9497c478bd9Sstevel@tonic-gate 			/*
9507c478bd9Sstevel@tonic-gate 			 * If this is last descriptor (next == NULL), then
9517c478bd9Sstevel@tonic-gate 			 * make sure the interrupt bit is enabled.  This
9527c478bd9Sstevel@tonic-gate 			 * way we can ensure that we are notified when the
9537c478bd9Sstevel@tonic-gate 			 * descriptor chain processing has come to an end.
9547c478bd9Sstevel@tonic-gate 			 */
9557c478bd9Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
9567c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_descp;
9577c478bd9Sstevel@tonic-gate 			acc_hdl =
9587c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
9597c478bd9Sstevel@tonic-gate 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
9607c478bd9Sstevel@tonic-gate 			temp |= DESC_INTR_ENBL;
9617c478bd9Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
9627c478bd9Sstevel@tonic-gate 		}
9637c478bd9Sstevel@tonic-gate 
9647c478bd9Sstevel@tonic-gate 		/*
9657c478bd9Sstevel@tonic-gate 		 * set jump address of final cur IXL xfer cmd to addr next
9667c478bd9Sstevel@tonic-gate 		 * IXL xfer cmd
9677c478bd9Sstevel@tonic-gate 		 */
9687c478bd9Sstevel@tonic-gate 		hcidescp = (hci1394_desc_t *)
9697c478bd9Sstevel@tonic-gate 		    xferctl_curp->dma[repcnt - 1].dma_descp;
9707c478bd9Sstevel@tonic-gate 		acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
9717c478bd9Sstevel@tonic-gate 		ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
9727c478bd9Sstevel@tonic-gate 
9737c478bd9Sstevel@tonic-gate 		/*
9747c478bd9Sstevel@tonic-gate 		 * if a composite object, forward link initial jump
9757c478bd9Sstevel@tonic-gate 		 * dma addresses
9767c478bd9Sstevel@tonic-gate 		 */
9777c478bd9Sstevel@tonic-gate 		for (ii = 0; ii < repcnt - 1; ii++) {
9787c478bd9Sstevel@tonic-gate 			hcidescp = (hci1394_desc_t *)
9797c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[ii].dma_descp;
9807c478bd9Sstevel@tonic-gate 			acc_hdl	 = xferctl_curp->dma[ii].dma_buf->bi_handle;
9817c478bd9Sstevel@tonic-gate 			ddi_put32(acc_hdl, &hcidescp->branch,
9827c478bd9Sstevel@tonic-gate 			    xferctl_curp->dma[ii + 1].dma_bound);
9837c478bd9Sstevel@tonic-gate 		}
9847c478bd9Sstevel@tonic-gate 
9857c478bd9Sstevel@tonic-gate 		/*
9867c478bd9Sstevel@tonic-gate 		 * fill in skip address(es) for all descriptor blocks belonging
9877c478bd9Sstevel@tonic-gate 		 * to current IXL xfer command; note:skip addresses apply only
9887c478bd9Sstevel@tonic-gate 		 * to xmit mode commands
9897c478bd9Sstevel@tonic-gate 		 */
9907c478bd9Sstevel@tonic-gate 		if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
9917c478bd9Sstevel@tonic-gate 
9927c478bd9Sstevel@tonic-gate 			/* first obtain and set skip mode information */
9937c478bd9Sstevel@tonic-gate 			wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
9947c478bd9Sstevel@tonic-gate 			hci1394_set_xmit_skip_mode(wvp);
9957c478bd9Sstevel@tonic-gate 
9967c478bd9Sstevel@tonic-gate 			/*
9977c478bd9Sstevel@tonic-gate 			 * if skip to label,init dma bound addr to be
9987c478bd9Sstevel@tonic-gate 			 * 1st xfer cmd after label
9997c478bd9Sstevel@tonic-gate 			 */
10007c478bd9Sstevel@tonic-gate 			dma_skiplabel_addr = 0;
10017c478bd9Sstevel@tonic-gate 			if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
10027c478bd9Sstevel@tonic-gate 			    (wvp->skipxferp != NULL)) {
10037c478bd9Sstevel@tonic-gate 				xferctl_nxtp = (hci1394_xfer_ctl_t *)
10047c478bd9Sstevel@tonic-gate 				    wvp->skipxferp->compiler_privatep;
10057c478bd9Sstevel@tonic-gate 				dma_skiplabel_addr =
10067c478bd9Sstevel@tonic-gate 				    xferctl_nxtp->dma[0].dma_bound;
10077c478bd9Sstevel@tonic-gate 			}
10087c478bd9Sstevel@tonic-gate 
10097c478bd9Sstevel@tonic-gate 			/*
10107c478bd9Sstevel@tonic-gate 			 * set skip addrs for each descriptor blk at this
10117c478bd9Sstevel@tonic-gate 			 * xfer start IXL cmd
10127c478bd9Sstevel@tonic-gate 			 */
10137c478bd9Sstevel@tonic-gate 			for (ii = 0; ii < repcnt; ii++) {
10147c478bd9Sstevel@tonic-gate 				switch (wvp->skipmode) {
10157c478bd9Sstevel@tonic-gate 
10167c478bd9Sstevel@tonic-gate 				case IXL1394_SKIP_TO_LABEL:
10177c478bd9Sstevel@tonic-gate 					/* set dma bound address - label */
10187c478bd9Sstevel@tonic-gate 					dma_skip_addr = dma_skiplabel_addr;
10197c478bd9Sstevel@tonic-gate 					break;
10207c478bd9Sstevel@tonic-gate 
10217c478bd9Sstevel@tonic-gate 				case IXL1394_SKIP_TO_NEXT:
10227c478bd9Sstevel@tonic-gate 					/* set dma bound address - next */
10237c478bd9Sstevel@tonic-gate 					if (ii < repcnt - 1) {
10247c478bd9Sstevel@tonic-gate 						dma_skip_addr = xferctl_curp->
10257c478bd9Sstevel@tonic-gate 						    dma[ii + 1].dma_bound;
10267c478bd9Sstevel@tonic-gate 					} else {
10277c478bd9Sstevel@tonic-gate 						dma_skip_addr =
10287c478bd9Sstevel@tonic-gate 						    dma_execnext_addr;
10297c478bd9Sstevel@tonic-gate 					}
10307c478bd9Sstevel@tonic-gate 					break;
10317c478bd9Sstevel@tonic-gate 
10327c478bd9Sstevel@tonic-gate 				case IXL1394_SKIP_TO_SELF:
10337c478bd9Sstevel@tonic-gate 					/* set dma bound address - self */
10347c478bd9Sstevel@tonic-gate 					dma_skip_addr =
10357c478bd9Sstevel@tonic-gate 					    xferctl_curp->dma[ii].dma_bound;
10367c478bd9Sstevel@tonic-gate 					break;
10377c478bd9Sstevel@tonic-gate 
10387c478bd9Sstevel@tonic-gate 				case IXL1394_SKIP_TO_STOP:
10397c478bd9Sstevel@tonic-gate 				default:
10407c478bd9Sstevel@tonic-gate 					/* set dma bound address - stop */
10417c478bd9Sstevel@tonic-gate 					dma_skip_addr = 0;
10427c478bd9Sstevel@tonic-gate 					break;
10437c478bd9Sstevel@tonic-gate 				}
10447c478bd9Sstevel@tonic-gate 
10457c478bd9Sstevel@tonic-gate 				/*
10467c478bd9Sstevel@tonic-gate 				 * determine address of first descriptor of
10477c478bd9Sstevel@tonic-gate 				 * current descriptor block by adjusting addr of
10487c478bd9Sstevel@tonic-gate 				 * last descriptor of current descriptor block
10497c478bd9Sstevel@tonic-gate 				 */
10507c478bd9Sstevel@tonic-gate 				hcidescp = ((hci1394_desc_t *)
10517c478bd9Sstevel@tonic-gate 				    xferctl_curp->dma[ii].dma_descp);
10527c478bd9Sstevel@tonic-gate 				acc_hdl =
10537c478bd9Sstevel@tonic-gate 				    xferctl_curp->dma[ii].dma_buf->bi_handle;
10547c478bd9Sstevel@tonic-gate 
10557c478bd9Sstevel@tonic-gate 				/*
10567c478bd9Sstevel@tonic-gate 				 * adjust by count of descriptors in this desc
10577c478bd9Sstevel@tonic-gate 				 * block not including the last one (size of
10587c478bd9Sstevel@tonic-gate 				 * descriptor)
10597c478bd9Sstevel@tonic-gate 				 */
10607c478bd9Sstevel@tonic-gate 				hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
10617c478bd9Sstevel@tonic-gate 				    DESC_Z_MASK) - 1);
10627c478bd9Sstevel@tonic-gate 
10637c478bd9Sstevel@tonic-gate 				/*
10647c478bd9Sstevel@tonic-gate 				 * adjust further if the last descriptor is
10657c478bd9Sstevel@tonic-gate 				 * double sized
10667c478bd9Sstevel@tonic-gate 				 */
10677c478bd9Sstevel@tonic-gate 				if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
10687c478bd9Sstevel@tonic-gate 					hcidescp++;
10697c478bd9Sstevel@tonic-gate 				}
10707c478bd9Sstevel@tonic-gate 				/*
10717c478bd9Sstevel@tonic-gate 				 * now set skip address into first descriptor
10727c478bd9Sstevel@tonic-gate 				 * of descriptor block
10737c478bd9Sstevel@tonic-gate 				 */
10747c478bd9Sstevel@tonic-gate 				ddi_put32(acc_hdl, &hcidescp->branch,
10757c478bd9Sstevel@tonic-gate 				    dma_skip_addr);
10767c478bd9Sstevel@tonic-gate 			} /* for */
10777c478bd9Sstevel@tonic-gate 		} /* if */
10787c478bd9Sstevel@tonic-gate 	} /* while */
10797c478bd9Sstevel@tonic-gate }
10807c478bd9Sstevel@tonic-gate 
10817c478bd9Sstevel@tonic-gate /*
10827c478bd9Sstevel@tonic-gate  * hci1394_finalize_cur_xfer_desc()
10837c478bd9Sstevel@tonic-gate  *    Build the openHCI descriptor for a packet or buffer based on info
10847c478bd9Sstevel@tonic-gate  *    currently collected into the working vars struct (wvp).  After some
10857c478bd9Sstevel@tonic-gate  *    checks, this routine dispatches to the appropriate descriptor block
10867c478bd9Sstevel@tonic-gate  *    build (bld) routine for the packet or buf type.
10877c478bd9Sstevel@tonic-gate  */
10887c478bd9Sstevel@tonic-gate static void
hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t * wvp)10897c478bd9Sstevel@tonic-gate hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
10907c478bd9Sstevel@tonic-gate {
10917c478bd9Sstevel@tonic-gate 	uint16_t ixlopcode;
10927c478bd9Sstevel@tonic-gate 	uint16_t ixlopraw;
10937c478bd9Sstevel@tonic-gate 
10947c478bd9Sstevel@tonic-gate 	/* extract opcode from current IXL cmd (if any) */
10957c478bd9Sstevel@tonic-gate 	if (wvp->ixl_cur_cmdp != NULL) {
10967c478bd9Sstevel@tonic-gate 		ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
10977c478bd9Sstevel@tonic-gate 		ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
10987c478bd9Sstevel@tonic-gate 	} else {
10997c478bd9Sstevel@tonic-gate 		ixlopcode = ixlopraw = IXL1394_OP_INVALID;
11007c478bd9Sstevel@tonic-gate 	}
11017c478bd9Sstevel@tonic-gate 
11027c478bd9Sstevel@tonic-gate 	/*
11037c478bd9Sstevel@tonic-gate 	 * if no xfer descriptor block being built, perform validity checks
11047c478bd9Sstevel@tonic-gate 	 */
11057c478bd9Sstevel@tonic-gate 	if (wvp->xfer_state == XFER_NONE) {
11067c478bd9Sstevel@tonic-gate 		/*
11077c478bd9Sstevel@tonic-gate 		 * error if being finalized by IXL1394_OP_LABEL or
11087c478bd9Sstevel@tonic-gate 		 * IXL1394_OP_JUMP or if at end, and have an unapplied
11097c478bd9Sstevel@tonic-gate 		 * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
11107c478bd9Sstevel@tonic-gate 		 * IXL1394_OP_SET_SYNCWAIT
11117c478bd9Sstevel@tonic-gate 		 */
11127c478bd9Sstevel@tonic-gate 		if ((ixlopraw == IXL1394_OP_JUMP) ||
11137c478bd9Sstevel@tonic-gate 		    (ixlopraw == IXL1394_OP_LABEL) ||
11147c478bd9Sstevel@tonic-gate 		    (wvp->ixl_cur_cmdp == NULL) ||
11157c478bd9Sstevel@tonic-gate 		    (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
11167c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_settagsync_cmdp != NULL) ||
11177c478bd9Sstevel@tonic-gate 			    (wvp->ixl_setskipmode_cmdp != NULL) ||
11187c478bd9Sstevel@tonic-gate 			    (wvp->ixl_setsyncwait_cnt != 0)) {
11197c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
11207c478bd9Sstevel@tonic-gate 				return;
11217c478bd9Sstevel@tonic-gate 			}
11227c478bd9Sstevel@tonic-gate 		}
11237c478bd9Sstevel@tonic-gate 
11247c478bd9Sstevel@tonic-gate 		/* error if finalize is due to updateable jump cmd */
11257c478bd9Sstevel@tonic-gate 		if (ixlopcode == IXL1394_OP_JUMP_U) {
11267c478bd9Sstevel@tonic-gate 			wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
11277c478bd9Sstevel@tonic-gate 			return;
11287c478bd9Sstevel@tonic-gate 		}
11297c478bd9Sstevel@tonic-gate 
11307c478bd9Sstevel@tonic-gate 		/* no error, no xfer */
11317c478bd9Sstevel@tonic-gate 		return;
11327c478bd9Sstevel@tonic-gate 	}
11337c478bd9Sstevel@tonic-gate 
11347c478bd9Sstevel@tonic-gate 	/*
11357c478bd9Sstevel@tonic-gate 	 * finalize current xfer descriptor block being built
11367c478bd9Sstevel@tonic-gate 	 */
11377c478bd9Sstevel@tonic-gate 
11387c478bd9Sstevel@tonic-gate 	/* count IXL xfer start command for descriptor block being built */
11397c478bd9Sstevel@tonic-gate 	wvp->ixl_xfer_st_cnt++;
11407c478bd9Sstevel@tonic-gate 
11417c478bd9Sstevel@tonic-gate 	/*
11427c478bd9Sstevel@tonic-gate 	 * complete setting of cache flush evaluation flags; flags will already
11437c478bd9Sstevel@tonic-gate 	 * have been set by updateable set cmds and non-start xfer pkt cmds
11447c478bd9Sstevel@tonic-gate 	 */
11457c478bd9Sstevel@tonic-gate 	/* now set cache flush flag if current xfer start cmnd is updateable */
11467c478bd9Sstevel@tonic-gate 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
11477c478bd9Sstevel@tonic-gate 		wvp->xfer_hci_flush |= UPDATEABLE_XFER;
11487c478bd9Sstevel@tonic-gate 	}
11497c478bd9Sstevel@tonic-gate 	/*
11507c478bd9Sstevel@tonic-gate 	 * also set cache flush flag if xfer being finalized by
11517c478bd9Sstevel@tonic-gate 	 * updateable jump cmd
11527c478bd9Sstevel@tonic-gate 	 */
11537c478bd9Sstevel@tonic-gate 	if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
11547c478bd9Sstevel@tonic-gate 		wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
11557c478bd9Sstevel@tonic-gate 	}
11567c478bd9Sstevel@tonic-gate 
11577c478bd9Sstevel@tonic-gate 	/*
11587c478bd9Sstevel@tonic-gate 	 * Determine if cache flush required before building next descriptor
11597c478bd9Sstevel@tonic-gate 	 * block. If xfer pkt command and any cache flush flags are set,
11607c478bd9Sstevel@tonic-gate 	 * hci flush needed.
11617c478bd9Sstevel@tonic-gate 	 * If buffer or special xfer command and xfer command is updateable or
11627c478bd9Sstevel@tonic-gate 	 * an associated set command is updateable, hci flush is required now.
11637c478bd9Sstevel@tonic-gate 	 * If a single-xfer buffer or special xfer command is finalized by
11647c478bd9Sstevel@tonic-gate 	 * updateable jump command, hci flush is required now.
11657c478bd9Sstevel@tonic-gate 	 * Note: a cache flush will be required later, before the last
11667c478bd9Sstevel@tonic-gate 	 * descriptor block of a multi-xfer set of descriptor blocks is built,
11677c478bd9Sstevel@tonic-gate 	 * if this (non-pkt) xfer is finalized by an updateable jump command.
11687c478bd9Sstevel@tonic-gate 	 */
11697c478bd9Sstevel@tonic-gate 	if (wvp->xfer_hci_flush != 0) {
11707c478bd9Sstevel@tonic-gate 		if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
11717c478bd9Sstevel@tonic-gate 		    IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
1172ffc2b7d4SToomas Soome 		    (UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
1173ffc2b7d4SToomas Soome 		    0)) {
11747c478bd9Sstevel@tonic-gate 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
11757c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
11767c478bd9Sstevel@tonic-gate 				return;
11777c478bd9Sstevel@tonic-gate 			}
11787c478bd9Sstevel@tonic-gate 		}
11797c478bd9Sstevel@tonic-gate 	}
11807c478bd9Sstevel@tonic-gate 
11817c478bd9Sstevel@tonic-gate 	/*
11827c478bd9Sstevel@tonic-gate 	 * determine which kind of descriptor block to build based on
11837c478bd9Sstevel@tonic-gate 	 * xfer state - hdr only, skip cycle, pkt or buf.
11847c478bd9Sstevel@tonic-gate 	 */
11857c478bd9Sstevel@tonic-gate 	switch (wvp->xfer_state) {
11867c478bd9Sstevel@tonic-gate 
11877c478bd9Sstevel@tonic-gate 	case XFER_PKT:
11887c478bd9Sstevel@tonic-gate 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
11897c478bd9Sstevel@tonic-gate 			hci1394_bld_recv_pkt_desc(wvp);
11907c478bd9Sstevel@tonic-gate 		} else {
11917c478bd9Sstevel@tonic-gate 			hci1394_bld_xmit_pkt_desc(wvp);
11927c478bd9Sstevel@tonic-gate 		}
11937c478bd9Sstevel@tonic-gate 		break;
11947c478bd9Sstevel@tonic-gate 
11957c478bd9Sstevel@tonic-gate 	case XFER_BUF:
11967c478bd9Sstevel@tonic-gate 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
11977c478bd9Sstevel@tonic-gate 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
11987c478bd9Sstevel@tonic-gate 				hci1394_bld_recv_buf_fill_desc(wvp);
11997c478bd9Sstevel@tonic-gate 			} else {
12007c478bd9Sstevel@tonic-gate 				hci1394_bld_recv_buf_ppb_desc(wvp);
12017c478bd9Sstevel@tonic-gate 			}
12027c478bd9Sstevel@tonic-gate 		} else {
12037c478bd9Sstevel@tonic-gate 			hci1394_bld_xmit_buf_desc(wvp);
12047c478bd9Sstevel@tonic-gate 		}
12057c478bd9Sstevel@tonic-gate 		break;
12067c478bd9Sstevel@tonic-gate 
12077c478bd9Sstevel@tonic-gate 	case XMIT_HDRONLY:
12087c478bd9Sstevel@tonic-gate 	case XMIT_NOPKT:
12097c478bd9Sstevel@tonic-gate 		hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
12107c478bd9Sstevel@tonic-gate 		break;
12117c478bd9Sstevel@tonic-gate 
12127c478bd9Sstevel@tonic-gate 	default:
12137c478bd9Sstevel@tonic-gate 		/* internal compiler error */
12147c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
12157c478bd9Sstevel@tonic-gate 	}
12167c478bd9Sstevel@tonic-gate 
12177c478bd9Sstevel@tonic-gate 	/* return if error */
12187c478bd9Sstevel@tonic-gate 	if (wvp->dma_bld_error != 0) {
12197c478bd9Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
12207c478bd9Sstevel@tonic-gate 		return;
12217c478bd9Sstevel@tonic-gate 	}
12227c478bd9Sstevel@tonic-gate 
12237c478bd9Sstevel@tonic-gate 	/*
12247c478bd9Sstevel@tonic-gate 	 * if was finalizing IXL jump cmd, set compiler_privatep to
12257c478bd9Sstevel@tonic-gate 	 * cur xfer IXL cmd
12267c478bd9Sstevel@tonic-gate 	 */
12277c478bd9Sstevel@tonic-gate 	if (ixlopraw == IXL1394_OP_JUMP) {
12287c478bd9Sstevel@tonic-gate 		wvp->ixl_cur_cmdp->compiler_privatep =
12297c478bd9Sstevel@tonic-gate 		    (void *)wvp->ixl_cur_xfer_stp;
12307c478bd9Sstevel@tonic-gate 	}
12317c478bd9Sstevel@tonic-gate 
12327c478bd9Sstevel@tonic-gate 	/* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
12337c478bd9Sstevel@tonic-gate 	if (wvp->ixl_cur_labelp != NULL) {
12347c478bd9Sstevel@tonic-gate 		((hci1394_xfer_ctl_t *)
12357c478bd9Sstevel@tonic-gate 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
12367c478bd9Sstevel@tonic-gate 		    XCTL_LABELLED;
12377c478bd9Sstevel@tonic-gate 		wvp->ixl_cur_labelp = NULL;
12387c478bd9Sstevel@tonic-gate 	}
12397c478bd9Sstevel@tonic-gate 
12407c478bd9Sstevel@tonic-gate 	/*
12417c478bd9Sstevel@tonic-gate 	 * set any associated IXL set skipmode cmd into xfer_ctl of
12427c478bd9Sstevel@tonic-gate 	 * cur xfer IXL cmd
12437c478bd9Sstevel@tonic-gate 	 */
12447c478bd9Sstevel@tonic-gate 	if (wvp->ixl_setskipmode_cmdp != NULL) {
12457c478bd9Sstevel@tonic-gate 		((hci1394_xfer_ctl_t *)
12467c478bd9Sstevel@tonic-gate 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
12477c478bd9Sstevel@tonic-gate 		    wvp->ixl_setskipmode_cmdp;
12487c478bd9Sstevel@tonic-gate 	}
12497c478bd9Sstevel@tonic-gate 
12507c478bd9Sstevel@tonic-gate 	/* set no current xfer start cmd */
12517c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp = NULL;
12527c478bd9Sstevel@tonic-gate 
12537c478bd9Sstevel@tonic-gate 	/* set no current set tag&sync, set skipmode or set syncwait commands */
12547c478bd9Sstevel@tonic-gate 	wvp->ixl_settagsync_cmdp = NULL;
12557c478bd9Sstevel@tonic-gate 	wvp->ixl_setskipmode_cmdp = NULL;
12567c478bd9Sstevel@tonic-gate 	wvp->ixl_setsyncwait_cnt = 0;
12577c478bd9Sstevel@tonic-gate 
12587c478bd9Sstevel@tonic-gate 	/* set no currently active descriptor blocks */
12597c478bd9Sstevel@tonic-gate 	wvp->descriptors = 0;
12607c478bd9Sstevel@tonic-gate 
12617c478bd9Sstevel@tonic-gate 	/* reset total packet length and buffers count */
12627c478bd9Sstevel@tonic-gate 	wvp->xfer_pktlen = 0;
12637c478bd9Sstevel@tonic-gate 	wvp->xfer_bufcnt = 0;
12647c478bd9Sstevel@tonic-gate 
12657c478bd9Sstevel@tonic-gate 	/* reset flush cache evaluation flags */
12667c478bd9Sstevel@tonic-gate 	wvp->xfer_hci_flush = 0;
12677c478bd9Sstevel@tonic-gate 
12687c478bd9Sstevel@tonic-gate 	/* set no xmit descriptor block being built */
12697c478bd9Sstevel@tonic-gate 	wvp->xfer_state = XFER_NONE;
12707c478bd9Sstevel@tonic-gate }
12717c478bd9Sstevel@tonic-gate 
12727c478bd9Sstevel@tonic-gate /*
12737c478bd9Sstevel@tonic-gate  * hci1394_bld_recv_pkt_desc()
12747c478bd9Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a receive packet.
12757c478bd9Sstevel@tonic-gate  */
12767c478bd9Sstevel@tonic-gate static void
hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t * wvp)12777c478bd9Sstevel@tonic-gate hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
12787c478bd9Sstevel@tonic-gate {
12797c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
12807c478bd9Sstevel@tonic-gate 	caddr_t			dma_descp;
12817c478bd9Sstevel@tonic-gate 	uint32_t		dma_desc_bound;
12827c478bd9Sstevel@tonic-gate 	uint32_t		wait_for_sync;
12837c478bd9Sstevel@tonic-gate 	uint32_t		ii;
12847c478bd9Sstevel@tonic-gate 	hci1394_desc_t		*wv_descp;	/* shorthand to local descrpt */
12857c478bd9Sstevel@tonic-gate 
12867c478bd9Sstevel@tonic-gate 	/*
12877c478bd9Sstevel@tonic-gate 	 * is error if number of descriptors to be built exceeds maximum
12887c478bd9Sstevel@tonic-gate 	 * descriptors allowed in a descriptor block.
12897c478bd9Sstevel@tonic-gate 	 */
12907c478bd9Sstevel@tonic-gate 	if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
12917c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
12927c478bd9Sstevel@tonic-gate 		return;
12937c478bd9Sstevel@tonic-gate 	}
12947c478bd9Sstevel@tonic-gate 
12957c478bd9Sstevel@tonic-gate 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
12967c478bd9Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
12977c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
12987c478bd9Sstevel@tonic-gate 		return;
12997c478bd9Sstevel@tonic-gate 	}
13007c478bd9Sstevel@tonic-gate 
13017c478bd9Sstevel@tonic-gate 	/*
13027c478bd9Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
13037c478bd9Sstevel@tonic-gate 	 * current IXL xfer cmd
13047c478bd9Sstevel@tonic-gate 	 */
13057c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
13067c478bd9Sstevel@tonic-gate 
13077c478bd9Sstevel@tonic-gate 	/*
13087c478bd9Sstevel@tonic-gate 	 * if enabled, set wait for sync flag in first descriptor of
13097c478bd9Sstevel@tonic-gate 	 * descriptor block
13107c478bd9Sstevel@tonic-gate 	 */
13117c478bd9Sstevel@tonic-gate 	if (wvp->ixl_setsyncwait_cnt > 0) {
13127c478bd9Sstevel@tonic-gate 		wvp->ixl_setsyncwait_cnt = 1;
13137c478bd9Sstevel@tonic-gate 		wait_for_sync = DESC_W_ENBL;
13147c478bd9Sstevel@tonic-gate 	} else {
13157c478bd9Sstevel@tonic-gate 		wait_for_sync = DESC_W_DSABL;
13167c478bd9Sstevel@tonic-gate 	}
13177c478bd9Sstevel@tonic-gate 
13187c478bd9Sstevel@tonic-gate 	/* create descriptor block for this recv packet (xfer status enabled) */
13197c478bd9Sstevel@tonic-gate 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
13207c478bd9Sstevel@tonic-gate 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
13217c478bd9Sstevel@tonic-gate 
13227c478bd9Sstevel@tonic-gate 		if (ii == (wvp->xfer_bufcnt - 1)) {
13237c478bd9Sstevel@tonic-gate 			HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
13247c478bd9Sstevel@tonic-gate 			    DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
13257c478bd9Sstevel@tonic-gate 		} else {
13267c478bd9Sstevel@tonic-gate 			HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
13277c478bd9Sstevel@tonic-gate 			    wvp->xfer_size[ii]);
13287c478bd9Sstevel@tonic-gate 		}
13297c478bd9Sstevel@tonic-gate 		wv_descp->data_addr = wvp->xfer_bufp[ii];
13307c478bd9Sstevel@tonic-gate 		wv_descp->branch = 0;
13317c478bd9Sstevel@tonic-gate 		wv_descp->status = (wvp->xfer_size[ii] <<
13327c478bd9Sstevel@tonic-gate 		    DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
13337c478bd9Sstevel@tonic-gate 		wvp->descriptors++;
13347c478bd9Sstevel@tonic-gate 	}
13357c478bd9Sstevel@tonic-gate 
13367c478bd9Sstevel@tonic-gate 	/* allocate and copy descriptor block to dma memory */
13377c478bd9Sstevel@tonic-gate 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
13387c478bd9Sstevel@tonic-gate 	    DDI_SUCCESS) {
13397c478bd9Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above function call */
13407c478bd9Sstevel@tonic-gate 		return;
13417c478bd9Sstevel@tonic-gate 	}
13427c478bd9Sstevel@tonic-gate 
13437c478bd9Sstevel@tonic-gate 	/*
13447c478bd9Sstevel@tonic-gate 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
13457c478bd9Sstevel@tonic-gate 	 * is last component)
13467c478bd9Sstevel@tonic-gate 	 */
13477c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_bound = dma_desc_bound;
13487c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_descp =
13497c478bd9Sstevel@tonic-gate 	    dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
13507c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
13517c478bd9Sstevel@tonic-gate }
13527c478bd9Sstevel@tonic-gate 
13537c478bd9Sstevel@tonic-gate /*
13547c478bd9Sstevel@tonic-gate  * hci1394_bld_recv_buf_ppb_desc()
13557c478bd9Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a receive buf
13567c478bd9Sstevel@tonic-gate  *    in packet per buffer mode.
13577c478bd9Sstevel@tonic-gate  */
13587c478bd9Sstevel@tonic-gate static void
hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t * wvp)13597c478bd9Sstevel@tonic-gate hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
13607c478bd9Sstevel@tonic-gate {
13617c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
13627c478bd9Sstevel@tonic-gate 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
13637c478bd9Sstevel@tonic-gate 	caddr_t		dma_descp;
13647c478bd9Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
13657c478bd9Sstevel@tonic-gate 	uint32_t	pktsize;
13667c478bd9Sstevel@tonic-gate 	uint32_t	pktcnt;
13677c478bd9Sstevel@tonic-gate 	uint32_t	wait_for_sync;
13687c478bd9Sstevel@tonic-gate 	uint32_t	ii;
13697c478bd9Sstevel@tonic-gate 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
13707c478bd9Sstevel@tonic-gate 
13717c478bd9Sstevel@tonic-gate 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
13727c478bd9Sstevel@tonic-gate 
13737c478bd9Sstevel@tonic-gate 	/* determine number and size of pkt desc blocks to create */
13747c478bd9Sstevel@tonic-gate 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
13757c478bd9Sstevel@tonic-gate 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
13767c478bd9Sstevel@tonic-gate 
13777c478bd9Sstevel@tonic-gate 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
13787c478bd9Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
13797c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
13807c478bd9Sstevel@tonic-gate 		return;
13817c478bd9Sstevel@tonic-gate 	}
13827c478bd9Sstevel@tonic-gate 
13837c478bd9Sstevel@tonic-gate 	/*
13847c478bd9Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
13857c478bd9Sstevel@tonic-gate 	 * current IXL xfer cmd
13867c478bd9Sstevel@tonic-gate 	 */
13877c478bd9Sstevel@tonic-gate 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
13887c478bd9Sstevel@tonic-gate 
13897c478bd9Sstevel@tonic-gate 	/*
13907c478bd9Sstevel@tonic-gate 	 * if enabled, set wait for sync flag in first descriptor in
13917c478bd9Sstevel@tonic-gate 	 * descriptor block
13927c478bd9Sstevel@tonic-gate 	 */
13937c478bd9Sstevel@tonic-gate 	if (wvp->ixl_setsyncwait_cnt > 0) {
13947c478bd9Sstevel@tonic-gate 		wvp->ixl_setsyncwait_cnt = 1;
13957c478bd9Sstevel@tonic-gate 		wait_for_sync = DESC_W_ENBL;
13967c478bd9Sstevel@tonic-gate 	} else {
13977c478bd9Sstevel@tonic-gate 		wait_for_sync = DESC_W_DSABL;
13987c478bd9Sstevel@tonic-gate 	}
13997c478bd9Sstevel@tonic-gate 
14007c478bd9Sstevel@tonic-gate 	/* create first descriptor block for this recv packet */
14017c478bd9Sstevel@tonic-gate 	/* consists of one descriptor and xfer status is enabled */
14027c478bd9Sstevel@tonic-gate 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
14037c478bd9Sstevel@tonic-gate 	HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
14047c478bd9Sstevel@tonic-gate 	    wait_for_sync, pktsize);
14057c478bd9Sstevel@tonic-gate 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
14067c478bd9Sstevel@tonic-gate 	wv_descp->branch = 0;
14077c478bd9Sstevel@tonic-gate 	wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
14087c478bd9Sstevel@tonic-gate 	    DESC_ST_RESCOUNT_MASK;
14097c478bd9Sstevel@tonic-gate 	wvp->descriptors++;
14107c478bd9Sstevel@tonic-gate 
14117c478bd9Sstevel@tonic-gate 	/*
14127c478bd9Sstevel@tonic-gate 	 * generate as many contiguous descriptor blocks as there are
14137c478bd9Sstevel@tonic-gate 	 * recv pkts
14147c478bd9Sstevel@tonic-gate 	 */
14157c478bd9Sstevel@tonic-gate 	for (ii = 0; ii < pktcnt; ii++) {
14167c478bd9Sstevel@tonic-gate 
14177c478bd9Sstevel@tonic-gate 		/* if about to create last descriptor block */
14187c478bd9Sstevel@tonic-gate 		if (ii == (pktcnt - 1)) {
14197c478bd9Sstevel@tonic-gate 			/* check and perform any required hci cache flush */
14207c478bd9Sstevel@tonic-gate 			if (hci1394_flush_end_desc_check(wvp, ii) !=
14217c478bd9Sstevel@tonic-gate 			    DDI_SUCCESS) {
14227c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
14237c478bd9Sstevel@tonic-gate 				return;
14247c478bd9Sstevel@tonic-gate 			}
14257c478bd9Sstevel@tonic-gate 		}
14267c478bd9Sstevel@tonic-gate 
14277c478bd9Sstevel@tonic-gate 		/* allocate and copy descriptor block to dma memory */
14287c478bd9Sstevel@tonic-gate 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
14297c478bd9Sstevel@tonic-gate 		    &dma_desc_bound) != DDI_SUCCESS) {
14307c478bd9Sstevel@tonic-gate 			/* wvp->dma_bld_error is set by above call */
14317c478bd9Sstevel@tonic-gate 			return;
14327c478bd9Sstevel@tonic-gate 		}
14337c478bd9Sstevel@tonic-gate 
14347c478bd9Sstevel@tonic-gate 		/*
14357c478bd9Sstevel@tonic-gate 		 * set dma addrs into xfer_ctl struct (unbound addr (kernel
14367c478bd9Sstevel@tonic-gate 		 * virtual) is last component (descriptor))
14377c478bd9Sstevel@tonic-gate 		 */
14387c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_bound = dma_desc_bound;
14397c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_descp = dma_descp;
14407c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
14417c478bd9Sstevel@tonic-gate 
14427c478bd9Sstevel@tonic-gate 		/* advance buffer ptr by pktsize in descriptor block */
14437c478bd9Sstevel@tonic-gate 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
14447c478bd9Sstevel@tonic-gate 		    pktsize;
14457c478bd9Sstevel@tonic-gate 	}
14467c478bd9Sstevel@tonic-gate }
14477c478bd9Sstevel@tonic-gate 
14487c478bd9Sstevel@tonic-gate /*
14497c478bd9Sstevel@tonic-gate  * hci1394_bld_recv_buf_fill_desc()
14507c478bd9Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a receive buf
14517c478bd9Sstevel@tonic-gate  *    in buffer fill mode.
14527c478bd9Sstevel@tonic-gate  */
14537c478bd9Sstevel@tonic-gate static void
hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t * wvp)14547c478bd9Sstevel@tonic-gate hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
14557c478bd9Sstevel@tonic-gate {
14567c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
14577c478bd9Sstevel@tonic-gate 	caddr_t			dma_descp;
14587c478bd9Sstevel@tonic-gate 	uint32_t		dma_desc_bound;
14597c478bd9Sstevel@tonic-gate 	uint32_t		wait_for_sync;
14607c478bd9Sstevel@tonic-gate 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
14617c478bd9Sstevel@tonic-gate 
14627c478bd9Sstevel@tonic-gate 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
14637c478bd9Sstevel@tonic-gate 
14647c478bd9Sstevel@tonic-gate 
14657c478bd9Sstevel@tonic-gate 	/* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
14667c478bd9Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
14677c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
14687c478bd9Sstevel@tonic-gate 		return;
14697c478bd9Sstevel@tonic-gate 	}
14707c478bd9Sstevel@tonic-gate 
14717c478bd9Sstevel@tonic-gate 	/*
14727c478bd9Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
14737c478bd9Sstevel@tonic-gate 	 * current IXL xfer cmd
14747c478bd9Sstevel@tonic-gate 	 */
14757c478bd9Sstevel@tonic-gate 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
14767c478bd9Sstevel@tonic-gate 
14777c478bd9Sstevel@tonic-gate 	/*
14787c478bd9Sstevel@tonic-gate 	 * if enabled, set wait for sync flag in first descriptor of
14797c478bd9Sstevel@tonic-gate 	 * descriptor block
14807c478bd9Sstevel@tonic-gate 	 */
14817c478bd9Sstevel@tonic-gate 	if (wvp->ixl_setsyncwait_cnt > 0) {
14827c478bd9Sstevel@tonic-gate 		wvp->ixl_setsyncwait_cnt = 1;
14837c478bd9Sstevel@tonic-gate 		wait_for_sync = DESC_W_ENBL;
14847c478bd9Sstevel@tonic-gate 	} else {
14857c478bd9Sstevel@tonic-gate 		wait_for_sync = DESC_W_DSABL;
14867c478bd9Sstevel@tonic-gate 	}
14877c478bd9Sstevel@tonic-gate 
14887c478bd9Sstevel@tonic-gate 	/*
14897c478bd9Sstevel@tonic-gate 	 * create descriptor block for this buffer fill mode recv command which
14907c478bd9Sstevel@tonic-gate 	 * consists of one descriptor with xfer status enabled
14917c478bd9Sstevel@tonic-gate 	 */
14927c478bd9Sstevel@tonic-gate 	HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
14937c478bd9Sstevel@tonic-gate 	    DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
14947c478bd9Sstevel@tonic-gate 
14957c478bd9Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors].data_addr =
14967c478bd9Sstevel@tonic-gate 	    local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
14977c478bd9Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors].branch = 0;
14987c478bd9Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors].status =
14997c478bd9Sstevel@tonic-gate 	    (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
15007c478bd9Sstevel@tonic-gate 	    DESC_ST_RESCOUNT_MASK;
15017c478bd9Sstevel@tonic-gate 	wvp->descriptors++;
15027c478bd9Sstevel@tonic-gate 
15037c478bd9Sstevel@tonic-gate 	/* check and perform any required hci cache flush */
15047c478bd9Sstevel@tonic-gate 	if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
15057c478bd9Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
15067c478bd9Sstevel@tonic-gate 		return;
15077c478bd9Sstevel@tonic-gate 	}
15087c478bd9Sstevel@tonic-gate 
15097c478bd9Sstevel@tonic-gate 	/* allocate and copy descriptor block to dma memory */
15107c478bd9Sstevel@tonic-gate 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
15117c478bd9Sstevel@tonic-gate 	    != DDI_SUCCESS) {
15127c478bd9Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
15137c478bd9Sstevel@tonic-gate 		return;
15147c478bd9Sstevel@tonic-gate 	}
15157c478bd9Sstevel@tonic-gate 
15167c478bd9Sstevel@tonic-gate 	/*
15177c478bd9Sstevel@tonic-gate 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
15187c478bd9Sstevel@tonic-gate 	 * is last component.
15197c478bd9Sstevel@tonic-gate 	 */
15207c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_bound = dma_desc_bound;
15217c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_descp = dma_descp;
15227c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
15237c478bd9Sstevel@tonic-gate }
15247c478bd9Sstevel@tonic-gate 
15257c478bd9Sstevel@tonic-gate /*
15267c478bd9Sstevel@tonic-gate  * hci1394_bld_xmit_pkt_desc()
15277c478bd9Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor block(s) for a transmit packet.
15287c478bd9Sstevel@tonic-gate  */
15297c478bd9Sstevel@tonic-gate static void
hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t * wvp)15307c478bd9Sstevel@tonic-gate hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
15317c478bd9Sstevel@tonic-gate {
15327c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t *xctlp;
15337c478bd9Sstevel@tonic-gate 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
15347c478bd9Sstevel@tonic-gate 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
15357c478bd9Sstevel@tonic-gate 	caddr_t		dma_descp;	/* dma bound memory for descriptor */
15367c478bd9Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
15377c478bd9Sstevel@tonic-gate 	uint32_t	ii;
15387c478bd9Sstevel@tonic-gate 
15397c478bd9Sstevel@tonic-gate 	/*
15407c478bd9Sstevel@tonic-gate 	 * is error if number of descriptors to be built exceeds maximum
15417c478bd9Sstevel@tonic-gate 	 * descriptors allowed in a descriptor block. Add 2 for the overhead
15427c478bd9Sstevel@tonic-gate 	 * of the OMORE-Immediate.
15437c478bd9Sstevel@tonic-gate 	 */
15447c478bd9Sstevel@tonic-gate 	if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
15457c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
15467c478bd9Sstevel@tonic-gate 		return;
15477c478bd9Sstevel@tonic-gate 	}
15487c478bd9Sstevel@tonic-gate 
15497c478bd9Sstevel@tonic-gate 	/* is error if total packet length exceeds 0xFFFF */
15507c478bd9Sstevel@tonic-gate 	if (wvp->xfer_pktlen > 0xFFFF) {
15517c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
15527c478bd9Sstevel@tonic-gate 		return;
15537c478bd9Sstevel@tonic-gate 	}
15547c478bd9Sstevel@tonic-gate 
15557c478bd9Sstevel@tonic-gate 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
15567c478bd9Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
15577c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
15587c478bd9Sstevel@tonic-gate 		return;
15597c478bd9Sstevel@tonic-gate 	}
15607c478bd9Sstevel@tonic-gate 
15617c478bd9Sstevel@tonic-gate 	/*
15627c478bd9Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
15637c478bd9Sstevel@tonic-gate 	 * current IXL xfer cmd
15647c478bd9Sstevel@tonic-gate 	 */
15657c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
15667c478bd9Sstevel@tonic-gate 
15677c478bd9Sstevel@tonic-gate 	/* generate values for the xmit pkt hdrs */
15687c478bd9Sstevel@tonic-gate 	hci1394_set_xmit_pkt_hdr(wvp);
15697c478bd9Sstevel@tonic-gate 
15707c478bd9Sstevel@tonic-gate 	/*
15717c478bd9Sstevel@tonic-gate 	 * xmit pkt starts with an output more immediate,
15727c478bd9Sstevel@tonic-gate 	 * a double sized hci1394_desc
15737c478bd9Sstevel@tonic-gate 	 */
15747c478bd9Sstevel@tonic-gate 	wv_omi_descp = (hci1394_output_more_imm_t *)
15757c478bd9Sstevel@tonic-gate 	    (&wvp->descriptor_block[wvp->descriptors]);
15767c478bd9Sstevel@tonic-gate 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
15777c478bd9Sstevel@tonic-gate 
15787c478bd9Sstevel@tonic-gate 	wv_omi_descp->data_addr = 0;
15797c478bd9Sstevel@tonic-gate 	wv_omi_descp->branch = 0;
15807c478bd9Sstevel@tonic-gate 	wv_omi_descp->status = 0;
15817c478bd9Sstevel@tonic-gate 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
15827c478bd9Sstevel@tonic-gate 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
15837c478bd9Sstevel@tonic-gate 	wv_omi_descp->q3 = 0;
15847c478bd9Sstevel@tonic-gate 	wv_omi_descp->q4 = 0;
15857c478bd9Sstevel@tonic-gate 
15867c478bd9Sstevel@tonic-gate 	wvp->descriptors += 2;
15877c478bd9Sstevel@tonic-gate 
15887c478bd9Sstevel@tonic-gate 	/*
15897c478bd9Sstevel@tonic-gate 	 * create the required output more hci1394_desc descriptor, then create
15907c478bd9Sstevel@tonic-gate 	 * an output last hci1394_desc descriptor with xfer status enabled
15917c478bd9Sstevel@tonic-gate 	 */
15927c478bd9Sstevel@tonic-gate 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
15937c478bd9Sstevel@tonic-gate 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
15947c478bd9Sstevel@tonic-gate 
15957c478bd9Sstevel@tonic-gate 		if (ii == (wvp->xfer_bufcnt - 1)) {
15967c478bd9Sstevel@tonic-gate 			HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
15977c478bd9Sstevel@tonic-gate 			    DESC_INTR_DSABL, wvp->xfer_size[ii]);
15987c478bd9Sstevel@tonic-gate 		} else {
15997c478bd9Sstevel@tonic-gate 			HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
16007c478bd9Sstevel@tonic-gate 		}
16017c478bd9Sstevel@tonic-gate 		wv_descp->data_addr = wvp->xfer_bufp[ii];
16027c478bd9Sstevel@tonic-gate 		wv_descp->branch = 0;
16037c478bd9Sstevel@tonic-gate 		wv_descp->status = 0;
16047c478bd9Sstevel@tonic-gate 		wvp->descriptors++;
16057c478bd9Sstevel@tonic-gate 	}
16067c478bd9Sstevel@tonic-gate 
16077c478bd9Sstevel@tonic-gate 	/* allocate and copy descriptor block to dma memory */
16087c478bd9Sstevel@tonic-gate 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
16097c478bd9Sstevel@tonic-gate 	    DDI_SUCCESS) {
16107c478bd9Sstevel@tonic-gate 		/* wvp->dma_bld_error is set by above call */
16117c478bd9Sstevel@tonic-gate 		return;
16127c478bd9Sstevel@tonic-gate 	}
16137c478bd9Sstevel@tonic-gate 
16147c478bd9Sstevel@tonic-gate 	/*
16157c478bd9Sstevel@tonic-gate 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
16167c478bd9Sstevel@tonic-gate 	 * is last component (descriptor))
16177c478bd9Sstevel@tonic-gate 	 */
16187c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_bound = dma_desc_bound;
16197c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_descp =
16207c478bd9Sstevel@tonic-gate 	    dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
16217c478bd9Sstevel@tonic-gate 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
16227c478bd9Sstevel@tonic-gate }
16237c478bd9Sstevel@tonic-gate 
16247c478bd9Sstevel@tonic-gate /*
16257c478bd9Sstevel@tonic-gate  * hci1394_bld_xmit_buf_desc()
16267c478bd9Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor blocks for a transmit buffer.
16277c478bd9Sstevel@tonic-gate  */
16287c478bd9Sstevel@tonic-gate static void
hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t * wvp)16297c478bd9Sstevel@tonic-gate hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
16307c478bd9Sstevel@tonic-gate {
16317c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
16327c478bd9Sstevel@tonic-gate 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
16337c478bd9Sstevel@tonic-gate 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
16347c478bd9Sstevel@tonic-gate 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
16357c478bd9Sstevel@tonic-gate 	caddr_t		dma_descp;
16367c478bd9Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
16377c478bd9Sstevel@tonic-gate 	uint32_t	pktsize;
16387c478bd9Sstevel@tonic-gate 	uint32_t	pktcnt;
16397c478bd9Sstevel@tonic-gate 	uint32_t	ii;
16407c478bd9Sstevel@tonic-gate 
16417c478bd9Sstevel@tonic-gate 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
16427c478bd9Sstevel@tonic-gate 
16437c478bd9Sstevel@tonic-gate 	/* determine number and size of pkt desc blocks to create */
16447c478bd9Sstevel@tonic-gate 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
16457c478bd9Sstevel@tonic-gate 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
16467c478bd9Sstevel@tonic-gate 
16477c478bd9Sstevel@tonic-gate 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
16487c478bd9Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
16497c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
16507c478bd9Sstevel@tonic-gate 		return;
16517c478bd9Sstevel@tonic-gate 	}
16527c478bd9Sstevel@tonic-gate 
16537c478bd9Sstevel@tonic-gate 	/*
16547c478bd9Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
16557c478bd9Sstevel@tonic-gate 	 * current IXL xfer cmd
16567c478bd9Sstevel@tonic-gate 	 */
16577c478bd9Sstevel@tonic-gate 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
16587c478bd9Sstevel@tonic-gate 
16597c478bd9Sstevel@tonic-gate 	/* generate values for the xmit pkt hdrs */
16607c478bd9Sstevel@tonic-gate 	wvp->xfer_pktlen = pktsize;
16617c478bd9Sstevel@tonic-gate 	hci1394_set_xmit_pkt_hdr(wvp);
16627c478bd9Sstevel@tonic-gate 
16637c478bd9Sstevel@tonic-gate 	/*
16647c478bd9Sstevel@tonic-gate 	 * xmit pkt starts with an output more immediate,
16657c478bd9Sstevel@tonic-gate 	 * a double sized hci1394_desc
16667c478bd9Sstevel@tonic-gate 	 */
16677c478bd9Sstevel@tonic-gate 	wv_omi_descp = (hci1394_output_more_imm_t *)
16687c478bd9Sstevel@tonic-gate 	    &wvp->descriptor_block[wvp->descriptors];
16697c478bd9Sstevel@tonic-gate 
16707c478bd9Sstevel@tonic-gate 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
16717c478bd9Sstevel@tonic-gate 
16727c478bd9Sstevel@tonic-gate 	wv_omi_descp->data_addr = 0;
16737c478bd9Sstevel@tonic-gate 	wv_omi_descp->branch = 0;
16747c478bd9Sstevel@tonic-gate 	wv_omi_descp->status = 0;
16757c478bd9Sstevel@tonic-gate 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
16767c478bd9Sstevel@tonic-gate 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
16777c478bd9Sstevel@tonic-gate 	wv_omi_descp->q3 = 0;
16787c478bd9Sstevel@tonic-gate 	wv_omi_descp->q4 = 0;
16797c478bd9Sstevel@tonic-gate 
16807c478bd9Sstevel@tonic-gate 	wvp->descriptors += 2;
16817c478bd9Sstevel@tonic-gate 
16827c478bd9Sstevel@tonic-gate 	/* follow with a single output last descriptor w/status enabled */
16837c478bd9Sstevel@tonic-gate 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
16847c478bd9Sstevel@tonic-gate 	HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
16857c478bd9Sstevel@tonic-gate 	    pktsize);
16867c478bd9Sstevel@tonic-gate 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
16877c478bd9Sstevel@tonic-gate 	wv_descp->branch = 0;
16887c478bd9Sstevel@tonic-gate 	wv_descp->status = 0;
16897c478bd9Sstevel@tonic-gate 	wvp->descriptors++;
16907c478bd9Sstevel@tonic-gate 
16917c478bd9Sstevel@tonic-gate 	/*
16927c478bd9Sstevel@tonic-gate 	 * generate as many contiguous descriptor blocks as there are
16937c478bd9Sstevel@tonic-gate 	 * xmit packets
16947c478bd9Sstevel@tonic-gate 	 */
16957c478bd9Sstevel@tonic-gate 	for (ii = 0; ii < pktcnt; ii++) {
16967c478bd9Sstevel@tonic-gate 
16977c478bd9Sstevel@tonic-gate 		/* if about to create last descriptor block */
16987c478bd9Sstevel@tonic-gate 		if (ii == (pktcnt - 1)) {
16997c478bd9Sstevel@tonic-gate 			/* check and perform any required hci cache flush */
17007c478bd9Sstevel@tonic-gate 			if (hci1394_flush_end_desc_check(wvp, ii) !=
17017c478bd9Sstevel@tonic-gate 			    DDI_SUCCESS) {
17027c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
17037c478bd9Sstevel@tonic-gate 				return;
17047c478bd9Sstevel@tonic-gate 			}
17057c478bd9Sstevel@tonic-gate 		}
17067c478bd9Sstevel@tonic-gate 
17077c478bd9Sstevel@tonic-gate 		/* allocate and copy descriptor block to dma memory */
17087c478bd9Sstevel@tonic-gate 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
17097c478bd9Sstevel@tonic-gate 		    &dma_desc_bound) != DDI_SUCCESS) {
17107c478bd9Sstevel@tonic-gate 			/* wvp->dma_bld_error is set by above call */
17117c478bd9Sstevel@tonic-gate 			return;
17127c478bd9Sstevel@tonic-gate 		}
17137c478bd9Sstevel@tonic-gate 
17147c478bd9Sstevel@tonic-gate 		/*
17157c478bd9Sstevel@tonic-gate 		 * set dma addrs into xfer_ctl structure (unbound addr
17167c478bd9Sstevel@tonic-gate 		 * (kernel virtual) is last component (descriptor))
17177c478bd9Sstevel@tonic-gate 		 */
17187c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_bound = dma_desc_bound;
17197c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_descp = dma_descp + 2 *
17207c478bd9Sstevel@tonic-gate 		    sizeof (hci1394_desc_t);
17217c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
17227c478bd9Sstevel@tonic-gate 
17237c478bd9Sstevel@tonic-gate 		/* advance buffer ptr by pktsize in descriptor block */
17247c478bd9Sstevel@tonic-gate 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
17257c478bd9Sstevel@tonic-gate 		    pktsize;
17267c478bd9Sstevel@tonic-gate 	}
17277c478bd9Sstevel@tonic-gate }
17287c478bd9Sstevel@tonic-gate 
17297c478bd9Sstevel@tonic-gate /*
17307c478bd9Sstevel@tonic-gate  * hci1394_bld_xmit_hdronly_nopkt_desc()
17317c478bd9Sstevel@tonic-gate  *    Used to create the openHCI dma descriptor blocks for transmitting
17327c478bd9Sstevel@tonic-gate  *    a packet consisting of an isochronous header with no data payload,
17337c478bd9Sstevel@tonic-gate  *    or for not sending a packet at all for a cycle.
17347c478bd9Sstevel@tonic-gate  *
17357c478bd9Sstevel@tonic-gate  *    A Store_Value openhci descriptor is built at the start of each
17367c478bd9Sstevel@tonic-gate  *    IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
17377c478bd9Sstevel@tonic-gate  *    descriptor block (to allow for skip cycle specification and set skipmode
17387c478bd9Sstevel@tonic-gate  *    processing for these commands).
17397c478bd9Sstevel@tonic-gate  */
17407c478bd9Sstevel@tonic-gate static void
hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t * wvp)17417c478bd9Sstevel@tonic-gate hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
17427c478bd9Sstevel@tonic-gate {
17437c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t	*xctlp;
17447c478bd9Sstevel@tonic-gate 	hci1394_output_last_t	*wv_ol_descp; /* shorthand to local descrp */
17457c478bd9Sstevel@tonic-gate 	hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
17467c478bd9Sstevel@tonic-gate 	caddr_t		dma_descp;
17477c478bd9Sstevel@tonic-gate 	uint32_t	dma_desc_bound;
17487c478bd9Sstevel@tonic-gate 	uint32_t	repcnt;
17497c478bd9Sstevel@tonic-gate 	uint32_t	ii;
17507c478bd9Sstevel@tonic-gate 
17517c478bd9Sstevel@tonic-gate 	/* determine # of instances of output hdronly/nopkt to generate */
17527c478bd9Sstevel@tonic-gate 	repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
17537c478bd9Sstevel@tonic-gate 
17547c478bd9Sstevel@tonic-gate 	/*
17557c478bd9Sstevel@tonic-gate 	 * allocate an xfer_ctl structure which includes repcnt
17567c478bd9Sstevel@tonic-gate 	 * xfer_ctl_dma structs
17577c478bd9Sstevel@tonic-gate 	 */
17587c478bd9Sstevel@tonic-gate 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
17597c478bd9Sstevel@tonic-gate 
17607c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
17617c478bd9Sstevel@tonic-gate 
17627c478bd9Sstevel@tonic-gate 		return;
17637c478bd9Sstevel@tonic-gate 	}
17647c478bd9Sstevel@tonic-gate 
17657c478bd9Sstevel@tonic-gate 	/*
17667c478bd9Sstevel@tonic-gate 	 * save xfer_ctl struct addr in compiler_privatep of
17677c478bd9Sstevel@tonic-gate 	 * current IXL xfer command
17687c478bd9Sstevel@tonic-gate 	 */
17697c478bd9Sstevel@tonic-gate 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
17707c478bd9Sstevel@tonic-gate 
17717c478bd9Sstevel@tonic-gate 	/*
17727c478bd9Sstevel@tonic-gate 	 * create a storevalue descriptor
17737c478bd9Sstevel@tonic-gate 	 * (will be used for skip vs jump processing)
17747c478bd9Sstevel@tonic-gate 	 */
17757c478bd9Sstevel@tonic-gate 	hci1394_set_xmit_storevalue_desc(wvp);
17767c478bd9Sstevel@tonic-gate 
17777c478bd9Sstevel@tonic-gate 	/*
17787c478bd9Sstevel@tonic-gate 	 * processing now based on opcode:
17797c478bd9Sstevel@tonic-gate 	 * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
17807c478bd9Sstevel@tonic-gate 	 */
17817c478bd9Sstevel@tonic-gate 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
17827c478bd9Sstevel@tonic-gate 	    IXL1394_OP_SEND_HDR_ONLY) {
17837c478bd9Sstevel@tonic-gate 
17847c478bd9Sstevel@tonic-gate 		/* for header only, generate values for the xmit pkt hdrs */
17857c478bd9Sstevel@tonic-gate 		hci1394_set_xmit_pkt_hdr(wvp);
17867c478bd9Sstevel@tonic-gate 
17877c478bd9Sstevel@tonic-gate 		/*
17887c478bd9Sstevel@tonic-gate 		 * create an output last immediate (double sized) descriptor
17897c478bd9Sstevel@tonic-gate 		 * xfer status enabled
17907c478bd9Sstevel@tonic-gate 		 */
17917c478bd9Sstevel@tonic-gate 		wv_oli_descp = (hci1394_output_last_imm_t *)
17927c478bd9Sstevel@tonic-gate 		    &wvp->descriptor_block[wvp->descriptors];
17937c478bd9Sstevel@tonic-gate 
17947c478bd9Sstevel@tonic-gate 		HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
17957c478bd9Sstevel@tonic-gate 		    DESC_INTR_DSABL);
17967c478bd9Sstevel@tonic-gate 
17977c478bd9Sstevel@tonic-gate 		wv_oli_descp->data_addr = 0;
17987c478bd9Sstevel@tonic-gate 		wv_oli_descp->branch = 0;
17997c478bd9Sstevel@tonic-gate 		wv_oli_descp->status = 0;
18007c478bd9Sstevel@tonic-gate 		wv_oli_descp->q1 = wvp->xmit_pkthdr1;
18017c478bd9Sstevel@tonic-gate 		wv_oli_descp->q2 = wvp->xmit_pkthdr2;
18027c478bd9Sstevel@tonic-gate 		wv_oli_descp->q3 = 0;
18037c478bd9Sstevel@tonic-gate 		wv_oli_descp->q4 = 0;
18047c478bd9Sstevel@tonic-gate 		wvp->descriptors += 2;
18057c478bd9Sstevel@tonic-gate 	} else {
18067c478bd9Sstevel@tonic-gate 		/*
18077c478bd9Sstevel@tonic-gate 		 * for skip cycle, create a single output last descriptor
18087c478bd9Sstevel@tonic-gate 		 * with xfer status enabled
18097c478bd9Sstevel@tonic-gate 		 */
18107c478bd9Sstevel@tonic-gate 		wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
18117c478bd9Sstevel@tonic-gate 		HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
18127c478bd9Sstevel@tonic-gate 		    DESC_INTR_DSABL, 0);
18137c478bd9Sstevel@tonic-gate 		wv_ol_descp->data_addr = 0;
18147c478bd9Sstevel@tonic-gate 		wv_ol_descp->branch = 0;
18157c478bd9Sstevel@tonic-gate 		wv_ol_descp->status = 0;
18167c478bd9Sstevel@tonic-gate 		wvp->descriptors++;
18177c478bd9Sstevel@tonic-gate 	}
18187c478bd9Sstevel@tonic-gate 
18197c478bd9Sstevel@tonic-gate 	/*
18207c478bd9Sstevel@tonic-gate 	 * generate as many contiguous descriptor blocks as repeat count
18217c478bd9Sstevel@tonic-gate 	 * indicates
18227c478bd9Sstevel@tonic-gate 	 */
18237c478bd9Sstevel@tonic-gate 	for (ii = 0; ii < repcnt; ii++) {
18247c478bd9Sstevel@tonic-gate 
18257c478bd9Sstevel@tonic-gate 		/* if about to create last descriptor block */
18267c478bd9Sstevel@tonic-gate 		if (ii == (repcnt - 1)) {
18277c478bd9Sstevel@tonic-gate 			/* check and perform any required hci cache flush */
18287c478bd9Sstevel@tonic-gate 			if (hci1394_flush_end_desc_check(wvp, ii) !=
18297c478bd9Sstevel@tonic-gate 			    DDI_SUCCESS) {
18307c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
18317c478bd9Sstevel@tonic-gate 				return;
18327c478bd9Sstevel@tonic-gate 			}
18337c478bd9Sstevel@tonic-gate 		}
18347c478bd9Sstevel@tonic-gate 
18357c478bd9Sstevel@tonic-gate 		/* allocate and copy descriptor block to dma memory */
18367c478bd9Sstevel@tonic-gate 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
18377c478bd9Sstevel@tonic-gate 		    &dma_desc_bound) != DDI_SUCCESS) {
18387c478bd9Sstevel@tonic-gate 			/* wvp->dma_bld_error is set by above call */
18397c478bd9Sstevel@tonic-gate 			return;
18407c478bd9Sstevel@tonic-gate 		}
18417c478bd9Sstevel@tonic-gate 
18427c478bd9Sstevel@tonic-gate 		/*
18437c478bd9Sstevel@tonic-gate 		 * set dma addrs into xfer_ctl structure (unbound addr
18447c478bd9Sstevel@tonic-gate 		 * (kernel virtual) is last component (descriptor)
18457c478bd9Sstevel@tonic-gate 		 */
18467c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_bound = dma_desc_bound;
18477c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
18487c478bd9Sstevel@tonic-gate 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
18497c478bd9Sstevel@tonic-gate 	}
18507c478bd9Sstevel@tonic-gate }
18517c478bd9Sstevel@tonic-gate 
18527c478bd9Sstevel@tonic-gate /*
18537c478bd9Sstevel@tonic-gate  * hci1394_bld_dma_mem_desc_blk()
18547c478bd9Sstevel@tonic-gate  *    Used to put a given OpenHCI descriptor block into dma bound memory.
18557c478bd9Sstevel@tonic-gate  */
18567c478bd9Sstevel@tonic-gate static int
hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t * wvp,caddr_t * dma_descpp,uint32_t * dma_desc_bound)18577c478bd9Sstevel@tonic-gate hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
18587c478bd9Sstevel@tonic-gate     uint32_t *dma_desc_bound)
18597c478bd9Sstevel@tonic-gate {
18607c478bd9Sstevel@tonic-gate 	uint32_t	dma_bound;
18617c478bd9Sstevel@tonic-gate 
18627c478bd9Sstevel@tonic-gate 	/* set internal error if no descriptor blocks to build */
18637c478bd9Sstevel@tonic-gate 	if (wvp->descriptors == 0) {
18647c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
18657c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
18667c478bd9Sstevel@tonic-gate 	}
18677c478bd9Sstevel@tonic-gate 
18687c478bd9Sstevel@tonic-gate 	/* allocate dma memory and move this descriptor block to it */
18697c478bd9Sstevel@tonic-gate 	*dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
18707c478bd9Sstevel@tonic-gate 	    sizeof (hci1394_desc_t), &dma_bound);
18717c478bd9Sstevel@tonic-gate 
18727c478bd9Sstevel@tonic-gate 	if (*dma_descpp == NULL) {
18737c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
18747c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
18757c478bd9Sstevel@tonic-gate 	}
18767c478bd9Sstevel@tonic-gate #ifdef _KERNEL
18777c478bd9Sstevel@tonic-gate 	ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
18787c478bd9Sstevel@tonic-gate 	    (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
18797c478bd9Sstevel@tonic-gate 	    wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
18807c478bd9Sstevel@tonic-gate 	    DDI_DEV_AUTOINCR);
18817c478bd9Sstevel@tonic-gate #else
18827c478bd9Sstevel@tonic-gate 	bcopy(wvp->descriptor_block, *dma_descpp,
18837c478bd9Sstevel@tonic-gate 	    wvp->descriptors * sizeof (hci1394_desc_t));
18847c478bd9Sstevel@tonic-gate #endif
18857c478bd9Sstevel@tonic-gate 	/*
18867c478bd9Sstevel@tonic-gate 	 * convert allocated block's memory address to bus address space
18877c478bd9Sstevel@tonic-gate 	 * include properly set Z bits (descriptor count).
18887c478bd9Sstevel@tonic-gate 	 */
18897c478bd9Sstevel@tonic-gate 	*dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
18907c478bd9Sstevel@tonic-gate 
18917c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
18927c478bd9Sstevel@tonic-gate }
18937c478bd9Sstevel@tonic-gate 
18947c478bd9Sstevel@tonic-gate /*
18957c478bd9Sstevel@tonic-gate  * hci1394_set_xmit_pkt_hdr()
18967c478bd9Sstevel@tonic-gate  *    Compose the 2 quadlets for the xmit packet header.
18977c478bd9Sstevel@tonic-gate  */
18987c478bd9Sstevel@tonic-gate static void
hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t * wvp)18997c478bd9Sstevel@tonic-gate hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
19007c478bd9Sstevel@tonic-gate {
19017c478bd9Sstevel@tonic-gate 	uint16_t tag;
19027c478bd9Sstevel@tonic-gate 	uint16_t sync;
19037c478bd9Sstevel@tonic-gate 
19047c478bd9Sstevel@tonic-gate 	/*
19057c478bd9Sstevel@tonic-gate 	 * choose tag and sync bits for header either from default values or
19067c478bd9Sstevel@tonic-gate 	 * from currently active set tag and sync IXL command
19077c478bd9Sstevel@tonic-gate 	 * (clear command after use)
19087c478bd9Sstevel@tonic-gate 	 */
19097c478bd9Sstevel@tonic-gate 	if (wvp->ixl_settagsync_cmdp == NULL) {
19107c478bd9Sstevel@tonic-gate 		tag = wvp->default_tag;
19117c478bd9Sstevel@tonic-gate 		sync = wvp->default_sync;
19127c478bd9Sstevel@tonic-gate 	} else {
19137c478bd9Sstevel@tonic-gate 		tag = wvp->ixl_settagsync_cmdp->tag;
19147c478bd9Sstevel@tonic-gate 		sync = wvp->ixl_settagsync_cmdp->sync;
19157c478bd9Sstevel@tonic-gate 		wvp->ixl_settagsync_cmdp = NULL;
19167c478bd9Sstevel@tonic-gate 	}
19177c478bd9Sstevel@tonic-gate 	tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
19187c478bd9Sstevel@tonic-gate 	sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
19197c478bd9Sstevel@tonic-gate 
19207c478bd9Sstevel@tonic-gate 	/*
19217c478bd9Sstevel@tonic-gate 	 * build xmit pkt header -
19227c478bd9Sstevel@tonic-gate 	 * hdr1 has speed, tag, channel number and sync bits
19237c478bd9Sstevel@tonic-gate 	 * hdr2 has the packet length.
19247c478bd9Sstevel@tonic-gate 	 */
19257c478bd9Sstevel@tonic-gate 	wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
19267c478bd9Sstevel@tonic-gate 	    (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
19277c478bd9Sstevel@tonic-gate 	    DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
19287c478bd9Sstevel@tonic-gate 	    DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
19297c478bd9Sstevel@tonic-gate 
19307c478bd9Sstevel@tonic-gate 	wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
19317c478bd9Sstevel@tonic-gate }
19327c478bd9Sstevel@tonic-gate 
19337c478bd9Sstevel@tonic-gate /*
19347c478bd9Sstevel@tonic-gate  * hci1394_set_xmit_skip_mode()
19357c478bd9Sstevel@tonic-gate  *    Set current skip mode from default or from currently active command.
19367c478bd9Sstevel@tonic-gate  *    If non-default skip mode command's skip mode is skip to label, find
19377c478bd9Sstevel@tonic-gate  *    and set xfer start IXL command which follows skip to label into
19387c478bd9Sstevel@tonic-gate  *    compiler_privatep of set skipmode IXL command.
19397c478bd9Sstevel@tonic-gate  */
19407c478bd9Sstevel@tonic-gate static void
hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t * wvp)19417c478bd9Sstevel@tonic-gate hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
19427c478bd9Sstevel@tonic-gate {
19437c478bd9Sstevel@tonic-gate 	int err;
19447c478bd9Sstevel@tonic-gate 
19457c478bd9Sstevel@tonic-gate 	if (wvp->ixl_setskipmode_cmdp == NULL) {
19467c478bd9Sstevel@tonic-gate 		wvp->skipmode = wvp->default_skipmode;
19477c478bd9Sstevel@tonic-gate 		wvp->skiplabelp = wvp->default_skiplabelp;
19487c478bd9Sstevel@tonic-gate 		wvp->skipxferp = wvp->default_skipxferp;
19497c478bd9Sstevel@tonic-gate 	} else {
19507c478bd9Sstevel@tonic-gate 		wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
19517c478bd9Sstevel@tonic-gate 		wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
19527c478bd9Sstevel@tonic-gate 		wvp->skipxferp = NULL;
19537c478bd9Sstevel@tonic-gate 		if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
19547c478bd9Sstevel@tonic-gate 			err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
19557c478bd9Sstevel@tonic-gate 			    NULL, &wvp->skipxferp);
19567c478bd9Sstevel@tonic-gate 			if (err == DDI_FAILURE) {
19577c478bd9Sstevel@tonic-gate 				wvp->skipxferp = NULL;
19587c478bd9Sstevel@tonic-gate 				wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
19597c478bd9Sstevel@tonic-gate 			}
19607c478bd9Sstevel@tonic-gate 		}
19617c478bd9Sstevel@tonic-gate 		wvp->ixl_setskipmode_cmdp->compiler_privatep =
19627c478bd9Sstevel@tonic-gate 		    (void *)wvp->skipxferp;
19637c478bd9Sstevel@tonic-gate 	}
19647c478bd9Sstevel@tonic-gate }
19657c478bd9Sstevel@tonic-gate 
19667c478bd9Sstevel@tonic-gate /*
19677c478bd9Sstevel@tonic-gate  * hci1394_set_xmit_storevalue_desc()
19687c478bd9Sstevel@tonic-gate  *    Set up store_value DMA descriptor.
19697c478bd9Sstevel@tonic-gate  *    XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
19707c478bd9Sstevel@tonic-gate  *    descriptor in the descriptor block (to handle skip mode processing)
19717c478bd9Sstevel@tonic-gate  */
19727c478bd9Sstevel@tonic-gate static void
hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t * wvp)19737c478bd9Sstevel@tonic-gate hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
19747c478bd9Sstevel@tonic-gate {
19757c478bd9Sstevel@tonic-gate 	wvp->descriptors++;
19767c478bd9Sstevel@tonic-gate 
19777c478bd9Sstevel@tonic-gate 	HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
19787c478bd9Sstevel@tonic-gate 	    wvp->storevalue_data);
19797c478bd9Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors - 1].data_addr =
19807c478bd9Sstevel@tonic-gate 	    wvp->storevalue_bufp;
19817c478bd9Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
19827c478bd9Sstevel@tonic-gate 	wvp->descriptor_block[wvp->descriptors - 1].status = 0;
19837c478bd9Sstevel@tonic-gate }
19847c478bd9Sstevel@tonic-gate 
19857c478bd9Sstevel@tonic-gate /*
19867c478bd9Sstevel@tonic-gate  * hci1394_set_next_xfer_buf()
19877c478bd9Sstevel@tonic-gate  *    This routine adds the data buffer to the current wvp list.
19887c478bd9Sstevel@tonic-gate  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
19897c478bd9Sstevel@tonic-gate  *    contains the error code.
19907c478bd9Sstevel@tonic-gate  */
19917c478bd9Sstevel@tonic-gate static int
hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t * wvp,uint32_t bufp,uint16_t size)19927c478bd9Sstevel@tonic-gate hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
19937c478bd9Sstevel@tonic-gate     uint16_t size)
19947c478bd9Sstevel@tonic-gate {
19957c478bd9Sstevel@tonic-gate 	/* error if buffer pointer is null (size may be 0) */
1996ffc2b7d4SToomas Soome 	if (bufp == 0) {
19977c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
19987c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
19997c478bd9Sstevel@tonic-gate 	}
20007c478bd9Sstevel@tonic-gate 
20017c478bd9Sstevel@tonic-gate 	/* count new xfer buffer */
20027c478bd9Sstevel@tonic-gate 	wvp->xfer_bufcnt++;
20037c478bd9Sstevel@tonic-gate 
20047c478bd9Sstevel@tonic-gate 	/* error if exceeds maximum xfer buffer components allowed */
20057c478bd9Sstevel@tonic-gate 	if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
20067c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
20077c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
20087c478bd9Sstevel@tonic-gate 	}
20097c478bd9Sstevel@tonic-gate 
20107c478bd9Sstevel@tonic-gate 	/* save xmit buffer and size */
20117c478bd9Sstevel@tonic-gate 	wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
20127c478bd9Sstevel@tonic-gate 	wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
20137c478bd9Sstevel@tonic-gate 
20147c478bd9Sstevel@tonic-gate 	/* accumulate total packet length */
20157c478bd9Sstevel@tonic-gate 	wvp->xfer_pktlen += size;
20167c478bd9Sstevel@tonic-gate 
20177c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
20187c478bd9Sstevel@tonic-gate }
20197c478bd9Sstevel@tonic-gate 
20207c478bd9Sstevel@tonic-gate /*
20217c478bd9Sstevel@tonic-gate  * hci1394_flush_end_desc_check()
20227c478bd9Sstevel@tonic-gate  *    Check if flush required before last descriptor block of a
20237c478bd9Sstevel@tonic-gate  *    non-unary set generated by an xfer buff or xmit special command
20247c478bd9Sstevel@tonic-gate  *    or a unary set provided no other flush has already been done.
20257c478bd9Sstevel@tonic-gate  *
20267c478bd9Sstevel@tonic-gate  *    hci flush is required if xfer is finalized by an updateable
20277c478bd9Sstevel@tonic-gate  *    jump command.
20287c478bd9Sstevel@tonic-gate  *
20297c478bd9Sstevel@tonic-gate  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
20307c478bd9Sstevel@tonic-gate  *    will contain the error code.
20317c478bd9Sstevel@tonic-gate  */
20327c478bd9Sstevel@tonic-gate static int
hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t * wvp,uint32_t count)20337c478bd9Sstevel@tonic-gate hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
20347c478bd9Sstevel@tonic-gate {
20357c478bd9Sstevel@tonic-gate 	if ((count != 0) ||
20367c478bd9Sstevel@tonic-gate 	    ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
2037ffc2b7d4SToomas Soome 	    INITIATING_LBL)) == 0)) {
20387c478bd9Sstevel@tonic-gate 
20397c478bd9Sstevel@tonic-gate 		if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
20407c478bd9Sstevel@tonic-gate 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
20417c478bd9Sstevel@tonic-gate 				/* wvp->dma_bld_error is set by above call */
20427c478bd9Sstevel@tonic-gate 				return (DDI_FAILURE);
20437c478bd9Sstevel@tonic-gate 			}
20447c478bd9Sstevel@tonic-gate 		}
20457c478bd9Sstevel@tonic-gate 	}
20467c478bd9Sstevel@tonic-gate 
20477c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
20487c478bd9Sstevel@tonic-gate }
20497c478bd9Sstevel@tonic-gate 
20507c478bd9Sstevel@tonic-gate /*
20517c478bd9Sstevel@tonic-gate  * hci1394_flush_hci_cache()
20527c478bd9Sstevel@tonic-gate  *    Sun hci controller (RIO) implementation specific processing!
20537c478bd9Sstevel@tonic-gate  *
20547c478bd9Sstevel@tonic-gate  *    Allocate dma memory for 1 hci descriptor block which will be left unused.
20557c478bd9Sstevel@tonic-gate  *    During execution this will cause a break in the contiguous address space
20567c478bd9Sstevel@tonic-gate  *    processing required by Sun's RIO implementation of the ohci controller and
20577c478bd9Sstevel@tonic-gate  *    will require the controller to refetch the next descriptor block from
20587c478bd9Sstevel@tonic-gate  *    host memory.
20597c478bd9Sstevel@tonic-gate  *
20607c478bd9Sstevel@tonic-gate  *    General rules for cache flush preceeding a descriptor block in dma memory:
20617c478bd9Sstevel@tonic-gate  *    1. Current IXL Xfer Command Updateable Rule:
20627c478bd9Sstevel@tonic-gate  *	    Cache flush of IXL xfer command is required if it, or any of the
20637c478bd9Sstevel@tonic-gate  *	    non-start IXL packet xfer commands associated with it, is flagged
20647c478bd9Sstevel@tonic-gate  *	    updateable.
20657c478bd9Sstevel@tonic-gate  *    2. Next IXL Xfer Command Indeterminate Rule:
20667c478bd9Sstevel@tonic-gate  *	    Cache flush of IXL xfer command is required if an IXL jump command
20677c478bd9Sstevel@tonic-gate  *	    which is flagged updateable has finalized the current IXL xfer
20687c478bd9Sstevel@tonic-gate  *	    command.
20697c478bd9Sstevel@tonic-gate  *    3. Updateable IXL Set Command Rule:
20707c478bd9Sstevel@tonic-gate  *	    Cache flush of an IXL xfer command is required if any of the IXL
20717c478bd9Sstevel@tonic-gate  *	    "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
20727c478bd9Sstevel@tonic-gate  *	    command (i.e. immediately preceeding it), is flagged updateable.
20737c478bd9Sstevel@tonic-gate  *    4. Label Initiating Xfer Command Rule:
20747c478bd9Sstevel@tonic-gate  *	    Cache flush of IXL xfer command is required if it is initiated by a
20757c478bd9Sstevel@tonic-gate  *	    label IXL command.  (This is to allow both a flush of the cache and
20767c478bd9Sstevel@tonic-gate  *	    an interrupt to be generated easily and in close proximity to each
20777c478bd9Sstevel@tonic-gate  *	    other.  This can make possible simpler more successful reset of
20787c478bd9Sstevel@tonic-gate  *	    descriptor statuses, especially under circumstances where the cycle
20797c478bd9Sstevel@tonic-gate  *	    of hci commands is short and/or there are no callbacks distributed
20807c478bd9Sstevel@tonic-gate  *	    through the span of xfers, etc...  This is especially important for
20817c478bd9Sstevel@tonic-gate  *	    input where statuses must be reset before execution cycles back
20827c478bd9Sstevel@tonic-gate  *	    again.
20837c478bd9Sstevel@tonic-gate  *
20847c478bd9Sstevel@tonic-gate  *    Application of above rules:
20857c478bd9Sstevel@tonic-gate  *    Packet mode IXL xfer commands:
20867c478bd9Sstevel@tonic-gate  *	    If any of the above flush rules apply, flush cache should be done
20877c478bd9Sstevel@tonic-gate  *	    immediately preceeding the generation of the dma descriptor block
20887c478bd9Sstevel@tonic-gate  *	    for the packet xfer.
20897c478bd9Sstevel@tonic-gate  *    Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
20907c478bd9Sstevel@tonic-gate  *    SEND_HDR_ONLY, and SEND_NO_PKT):
20917c478bd9Sstevel@tonic-gate  *	    If Rules #1, #3 or #4 applies, a flush cache should be done
20927c478bd9Sstevel@tonic-gate  *	    immediately before the first generated dma descriptor block of the
20937c478bd9Sstevel@tonic-gate  *	    non-packet xfer.
20947c478bd9Sstevel@tonic-gate  *	    If Rule #2 applies, a flush cache should be done immediately before
20957c478bd9Sstevel@tonic-gate  *	    the last generated dma descriptor block of the non-packet xfer.
20967c478bd9Sstevel@tonic-gate  *
20977c478bd9Sstevel@tonic-gate  *    Note: The flush cache should be done at most once in each location that is
20987c478bd9Sstevel@tonic-gate  *    required to be flushed no matter how many rules apply (i.e. only once
20997c478bd9Sstevel@tonic-gate  *    before the first descriptor block and/or only once before the last
21007c478bd9Sstevel@tonic-gate  *    descriptor block generated).  If more than one place requires a flush,
21017c478bd9Sstevel@tonic-gate  *    then both flush operations must be performed.  This is determined by
21027c478bd9Sstevel@tonic-gate  *    taking all rules that apply into account.
21037c478bd9Sstevel@tonic-gate  *
21047c478bd9Sstevel@tonic-gate  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
21057c478bd9Sstevel@tonic-gate  *    will contain the error code.
21067c478bd9Sstevel@tonic-gate  */
21077c478bd9Sstevel@tonic-gate static int
hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t * wvp)21087c478bd9Sstevel@tonic-gate hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
21097c478bd9Sstevel@tonic-gate {
21107c478bd9Sstevel@tonic-gate 	uint32_t	dma_bound;
21117c478bd9Sstevel@tonic-gate 
21127c478bd9Sstevel@tonic-gate 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
21137c478bd9Sstevel@tonic-gate 	    NULL) {
21147c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
21157c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
21167c478bd9Sstevel@tonic-gate 	}
21177c478bd9Sstevel@tonic-gate 
21187c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
21197c478bd9Sstevel@tonic-gate }
21207c478bd9Sstevel@tonic-gate 
21217c478bd9Sstevel@tonic-gate /*
21227c478bd9Sstevel@tonic-gate  * hci1394_alloc_storevalue_dma_mem()
21237c478bd9Sstevel@tonic-gate  *    Allocate dma memory for a 1 hci component descriptor block
21247c478bd9Sstevel@tonic-gate  *    which will be used as the dma memory location that ixl
21257c478bd9Sstevel@tonic-gate  *    compiler generated storevalue descriptor commands will
21267c478bd9Sstevel@tonic-gate  *    specify as location to store their data value.
21277c478bd9Sstevel@tonic-gate  *
21287c478bd9Sstevel@tonic-gate  *    Returns 32-bit bound address of allocated mem, or NULL.
21297c478bd9Sstevel@tonic-gate  */
21307c478bd9Sstevel@tonic-gate static uint32_t
hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t * wvp)21317c478bd9Sstevel@tonic-gate hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
21327c478bd9Sstevel@tonic-gate {
21337c478bd9Sstevel@tonic-gate 	uint32_t	dma_bound;
21347c478bd9Sstevel@tonic-gate 
21357c478bd9Sstevel@tonic-gate 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
21367c478bd9Sstevel@tonic-gate 	    &dma_bound) == NULL) {
21377c478bd9Sstevel@tonic-gate 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2138ffc2b7d4SToomas Soome 		return (0);
21397c478bd9Sstevel@tonic-gate 	}
21407c478bd9Sstevel@tonic-gate 
21417c478bd9Sstevel@tonic-gate 	/* return bound address of allocated memory */
21427c478bd9Sstevel@tonic-gate 	return (dma_bound);
21437c478bd9Sstevel@tonic-gate }
21447c478bd9Sstevel@tonic-gate 
21457c478bd9Sstevel@tonic-gate 
21467c478bd9Sstevel@tonic-gate /*
21477c478bd9Sstevel@tonic-gate  * hci1394_alloc_xfer_ctl()
21487c478bd9Sstevel@tonic-gate  *    Allocate an xfer_ctl structure.
21497c478bd9Sstevel@tonic-gate  */
21507c478bd9Sstevel@tonic-gate static hci1394_xfer_ctl_t *
hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t * wvp,uint32_t dmacnt)21517c478bd9Sstevel@tonic-gate hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
21527c478bd9Sstevel@tonic-gate {
21537c478bd9Sstevel@tonic-gate 	hci1394_xfer_ctl_t *xcsp;
21547c478bd9Sstevel@tonic-gate 
21557c478bd9Sstevel@tonic-gate 	/*
21567c478bd9Sstevel@tonic-gate 	 * allocate an xfer_ctl struct which includes dmacnt of
21577c478bd9Sstevel@tonic-gate 	 * xfer_ctl_dma structs
21587c478bd9Sstevel@tonic-gate 	 */
21597c478bd9Sstevel@tonic-gate #ifdef _KERNEL
21607c478bd9Sstevel@tonic-gate 	if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
21617c478bd9Sstevel@tonic-gate 	    (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
21627c478bd9Sstevel@tonic-gate 	    sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
21637c478bd9Sstevel@tonic-gate 
21647c478bd9Sstevel@tonic-gate 		return (NULL);
21657c478bd9Sstevel@tonic-gate 	}
21667c478bd9Sstevel@tonic-gate #else
21677c478bd9Sstevel@tonic-gate 	/*
21687c478bd9Sstevel@tonic-gate 	 * This section makes it possible to easily run and test the compiler in
21697c478bd9Sstevel@tonic-gate 	 * user mode.
21707c478bd9Sstevel@tonic-gate 	 */
21717c478bd9Sstevel@tonic-gate 	if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
21727c478bd9Sstevel@tonic-gate 	    sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
21737c478bd9Sstevel@tonic-gate 	    sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
21747c478bd9Sstevel@tonic-gate 		return (NULL);
21757c478bd9Sstevel@tonic-gate 	}
21767c478bd9Sstevel@tonic-gate #endif
21777c478bd9Sstevel@tonic-gate 	/*
21787c478bd9Sstevel@tonic-gate 	 * set dma structure count into allocated xfer_ctl struct for
21797c478bd9Sstevel@tonic-gate 	 * later deletion.
21807c478bd9Sstevel@tonic-gate 	 */
21817c478bd9Sstevel@tonic-gate 	xcsp->cnt = dmacnt;
21827c478bd9Sstevel@tonic-gate 
21837c478bd9Sstevel@tonic-gate 	/* link it to previously allocated xfer_ctl structs or set as first */
21847c478bd9Sstevel@tonic-gate 	if (wvp->xcs_firstp == NULL) {
21857c478bd9Sstevel@tonic-gate 		wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
21867c478bd9Sstevel@tonic-gate 	} else {
21877c478bd9Sstevel@tonic-gate 		wvp->xcs_currentp->ctl_nextp = xcsp;
21887c478bd9Sstevel@tonic-gate 		wvp->xcs_currentp = xcsp;
21897c478bd9Sstevel@tonic-gate 	}
21907c478bd9Sstevel@tonic-gate 
21917c478bd9Sstevel@tonic-gate 	/* return allocated xfer_ctl structure */
21927c478bd9Sstevel@tonic-gate 	return (xcsp);
21937c478bd9Sstevel@tonic-gate }
21947c478bd9Sstevel@tonic-gate 
21957c478bd9Sstevel@tonic-gate /*
21967c478bd9Sstevel@tonic-gate  * hci1394_alloc_dma_mem()
21977c478bd9Sstevel@tonic-gate  *	Allocates and binds memory for openHCI DMA descriptors as needed.
21987c478bd9Sstevel@tonic-gate  */
21997c478bd9Sstevel@tonic-gate static void *
hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t * wvp,uint32_t size,uint32_t * dma_bound)22007c478bd9Sstevel@tonic-gate hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
22017c478bd9Sstevel@tonic-gate     uint32_t *dma_bound)
22027c478bd9Sstevel@tonic-gate {
22037c478bd9Sstevel@tonic-gate 	hci1394_idma_desc_mem_t *dma_new;
22047c478bd9Sstevel@tonic-gate 	hci1394_buf_parms_t parms;
22057c478bd9Sstevel@tonic-gate 	hci1394_buf_info_t *memp;
22067c478bd9Sstevel@tonic-gate 	void	*dma_mem_ret;
22077c478bd9Sstevel@tonic-gate 	int	ret;
22087c478bd9Sstevel@tonic-gate 
22097c478bd9Sstevel@tonic-gate 	/*
22107c478bd9Sstevel@tonic-gate 	 * if no dma has been allocated or current request exceeds
22117c478bd9Sstevel@tonic-gate 	 * remaining memory
22127c478bd9Sstevel@tonic-gate 	 */
22137c478bd9Sstevel@tonic-gate 	if ((wvp->dma_currentp == NULL) ||
22147c478bd9Sstevel@tonic-gate 	    (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
2215ffc2b7d4SToomas Soome 	    wvp->dma_currentp->used))) {
22167c478bd9Sstevel@tonic-gate #ifdef _KERNEL
22177c478bd9Sstevel@tonic-gate 		/* kernel-mode memory allocation for driver */
22187c478bd9Sstevel@tonic-gate 
22197c478bd9Sstevel@tonic-gate 		/* allocate struct to track more dma descriptor memory */
22207c478bd9Sstevel@tonic-gate 		if ((dma_new = (hci1394_idma_desc_mem_t *)
22217c478bd9Sstevel@tonic-gate 		    kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
22227c478bd9Sstevel@tonic-gate 		    KM_NOSLEEP)) == NULL) {
22237c478bd9Sstevel@tonic-gate 			return (NULL);
22247c478bd9Sstevel@tonic-gate 		}
22257c478bd9Sstevel@tonic-gate 
22267c478bd9Sstevel@tonic-gate 		/*
22277c478bd9Sstevel@tonic-gate 		 * if more cookies available from the current mem, try to find
22287c478bd9Sstevel@tonic-gate 		 * one of suitable size. Cookies that are too small will be
22297c478bd9Sstevel@tonic-gate 		 * skipped and unused. Given that cookie size is always at least
22307c478bd9Sstevel@tonic-gate 		 * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
22317c478bd9Sstevel@tonic-gate 		 * it's a small price to pay for code simplicity.
22327c478bd9Sstevel@tonic-gate 		 */
22337c478bd9Sstevel@tonic-gate 		if (wvp->dma_currentp != NULL) {
22347c478bd9Sstevel@tonic-gate 			/* new struct is derived from current */
22357c478bd9Sstevel@tonic-gate 			memp = &wvp->dma_currentp->mem;
22367c478bd9Sstevel@tonic-gate 			dma_new->mem = *memp;
22377c478bd9Sstevel@tonic-gate 			dma_new->offset = wvp->dma_currentp->offset +
22387c478bd9Sstevel@tonic-gate 			    memp->bi_cookie.dmac_size;
22397c478bd9Sstevel@tonic-gate 
22407c478bd9Sstevel@tonic-gate 			for (; memp->bi_cookie_count > 1;
22417c478bd9Sstevel@tonic-gate 			    memp->bi_cookie_count--) {
22427c478bd9Sstevel@tonic-gate 				ddi_dma_nextcookie(memp->bi_dma_handle,
22437c478bd9Sstevel@tonic-gate 				    &dma_new->mem.bi_cookie);
22447c478bd9Sstevel@tonic-gate 
22457c478bd9Sstevel@tonic-gate 				if (dma_new->mem.bi_cookie.dmac_size >= size) {
22467c478bd9Sstevel@tonic-gate 					dma_new->mem_handle =
22477c478bd9Sstevel@tonic-gate 					    wvp->dma_currentp->mem_handle;
22487c478bd9Sstevel@tonic-gate 					wvp->dma_currentp->mem_handle = NULL;
22497c478bd9Sstevel@tonic-gate 					dma_new->mem.bi_cookie_count--;
22507c478bd9Sstevel@tonic-gate 					break;
22517c478bd9Sstevel@tonic-gate 				}
22527c478bd9Sstevel@tonic-gate 				dma_new->offset +=
22537c478bd9Sstevel@tonic-gate 				    dma_new->mem.bi_cookie.dmac_size;
22547c478bd9Sstevel@tonic-gate 			}
22557c478bd9Sstevel@tonic-gate 		}
22567c478bd9Sstevel@tonic-gate 
22577c478bd9Sstevel@tonic-gate 		/* if no luck with current buffer, allocate a new one */
22587c478bd9Sstevel@tonic-gate 		if (dma_new->mem_handle == NULL) {
22597c478bd9Sstevel@tonic-gate 			parms.bp_length = HCI1394_IXL_PAGESIZE;
22607c478bd9Sstevel@tonic-gate 			parms.bp_max_cookies = OHCI_MAX_COOKIE;
22617c478bd9Sstevel@tonic-gate 			parms.bp_alignment = 16;
22627c478bd9Sstevel@tonic-gate 			ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
22637c478bd9Sstevel@tonic-gate 			    &parms, &dma_new->mem, &dma_new->mem_handle);
22647c478bd9Sstevel@tonic-gate 			if (ret != DDI_SUCCESS) {
22657c478bd9Sstevel@tonic-gate 				kmem_free(dma_new,
22667c478bd9Sstevel@tonic-gate 				    sizeof (hci1394_idma_desc_mem_t));
22677c478bd9Sstevel@tonic-gate 
22687c478bd9Sstevel@tonic-gate 				return (NULL);
22697c478bd9Sstevel@tonic-gate 			}
22707c478bd9Sstevel@tonic-gate 
22717c478bd9Sstevel@tonic-gate 			/* paranoia: this is not supposed to happen */
22727c478bd9Sstevel@tonic-gate 			if (dma_new->mem.bi_cookie.dmac_size < size) {
22737c478bd9Sstevel@tonic-gate 				hci1394_buf_free(&dma_new->mem_handle);
22747c478bd9Sstevel@tonic-gate 				kmem_free(dma_new,
22757c478bd9Sstevel@tonic-gate 				    sizeof (hci1394_idma_desc_mem_t));
22767c478bd9Sstevel@tonic-gate 
22777c478bd9Sstevel@tonic-gate 				return (NULL);
22787c478bd9Sstevel@tonic-gate 			}
22797c478bd9Sstevel@tonic-gate 			dma_new->offset = 0;
22807c478bd9Sstevel@tonic-gate 		}
22817c478bd9Sstevel@tonic-gate #else
22827c478bd9Sstevel@tonic-gate 		/* user-mode memory allocation for user mode compiler tests */
22837c478bd9Sstevel@tonic-gate 		/* allocate another dma_desc_mem struct */
22847c478bd9Sstevel@tonic-gate 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2285ffc2b7d4SToomas Soome 		    calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
22867c478bd9Sstevel@tonic-gate 			return (NULL);
22877c478bd9Sstevel@tonic-gate 		}
22887c478bd9Sstevel@tonic-gate 		dma_new->mem.bi_dma_handle = NULL;
22897c478bd9Sstevel@tonic-gate 		dma_new->mem.bi_handle = NULL;
22907c478bd9Sstevel@tonic-gate 		if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
2291ffc2b7d4SToomas Soome 		    HCI1394_IXL_PAGESIZE)) == NULL) {
22927c478bd9Sstevel@tonic-gate 			return (NULL);
22937c478bd9Sstevel@tonic-gate 		}
22947c478bd9Sstevel@tonic-gate 		dma_new->mem.bi_cookie.dmac_address =
22957c478bd9Sstevel@tonic-gate 		    (unsigned long)dma_new->mem.bi_kaddr;
22967c478bd9Sstevel@tonic-gate 		dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
22977c478bd9Sstevel@tonic-gate 		dma_new->mem.bi_cookie_count = 1;
22987c478bd9Sstevel@tonic-gate #endif
22997c478bd9Sstevel@tonic-gate 
23007c478bd9Sstevel@tonic-gate 		/* if this is not first dma_desc_mem, link last one to it */
23017c478bd9Sstevel@tonic-gate 		if (wvp->dma_currentp != NULL) {
23027c478bd9Sstevel@tonic-gate 			wvp->dma_currentp->dma_nextp = dma_new;
23037c478bd9Sstevel@tonic-gate 			wvp->dma_currentp = dma_new;
23047c478bd9Sstevel@tonic-gate 		} else {
23057c478bd9Sstevel@tonic-gate 			/* else set it as first one */
23067c478bd9Sstevel@tonic-gate 			wvp->dma_currentp = wvp->dma_firstp = dma_new;
23077c478bd9Sstevel@tonic-gate 		}
23087c478bd9Sstevel@tonic-gate 	}
23097c478bd9Sstevel@tonic-gate 
23107c478bd9Sstevel@tonic-gate 	/* now allocate requested memory from current block */
23117c478bd9Sstevel@tonic-gate 	dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
23127c478bd9Sstevel@tonic-gate 	    wvp->dma_currentp->offset + wvp->dma_currentp->used;
23137c478bd9Sstevel@tonic-gate 	*dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
23147c478bd9Sstevel@tonic-gate 	    wvp->dma_currentp->used;
23157c478bd9Sstevel@tonic-gate 	wvp->dma_currentp->used += size;
23167c478bd9Sstevel@tonic-gate 
23177c478bd9Sstevel@tonic-gate 	return (dma_mem_ret);
23187c478bd9Sstevel@tonic-gate }
23197c478bd9Sstevel@tonic-gate 
23207c478bd9Sstevel@tonic-gate 
23217c478bd9Sstevel@tonic-gate /*
23227c478bd9Sstevel@tonic-gate  * hci1394_is_opcode_valid()
23237c478bd9Sstevel@tonic-gate  *    given an ixl opcode, this routine returns B_TRUE if it is a
23247c478bd9Sstevel@tonic-gate  *    recognized opcode and B_FALSE if it is not recognized.
23257c478bd9Sstevel@tonic-gate  *    Note that the FULL 16 bits of the opcode are checked which includes
23267c478bd9Sstevel@tonic-gate  *    various flags and not just the low order 8 bits of unique code.
23277c478bd9Sstevel@tonic-gate  */
23287c478bd9Sstevel@tonic-gate static boolean_t
hci1394_is_opcode_valid(uint16_t ixlopcode)23297c478bd9Sstevel@tonic-gate hci1394_is_opcode_valid(uint16_t ixlopcode)
23307c478bd9Sstevel@tonic-gate {
23317c478bd9Sstevel@tonic-gate 	/* if it's not one we know about, then it's bad */
23327c478bd9Sstevel@tonic-gate 	switch (ixlopcode) {
23337c478bd9Sstevel@tonic-gate 	case IXL1394_OP_LABEL:
23347c478bd9Sstevel@tonic-gate 	case IXL1394_OP_JUMP:
23357c478bd9Sstevel@tonic-gate 	case IXL1394_OP_CALLBACK:
23367c478bd9Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT:
23377c478bd9Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT_ST:
23387c478bd9Sstevel@tonic-gate 	case IXL1394_OP_RECV_BUF:
23397c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT:
23407c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_ST:
23417c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_WHDR_ST:
23427c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_BUF:
23437c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_HDR_ONLY:
23447c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_NO_PKT:
23457c478bd9Sstevel@tonic-gate 	case IXL1394_OP_STORE_TIMESTAMP:
23467c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SET_TAGSYNC:
23477c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SET_SKIPMODE:
23487c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SET_SYNCWAIT:
23497c478bd9Sstevel@tonic-gate 	case IXL1394_OP_JUMP_U:
23507c478bd9Sstevel@tonic-gate 	case IXL1394_OP_CALLBACK_U:
23517c478bd9Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT_U:
23527c478bd9Sstevel@tonic-gate 	case IXL1394_OP_RECV_PKT_ST_U:
23537c478bd9Sstevel@tonic-gate 	case IXL1394_OP_RECV_BUF_U:
23547c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_U:
23557c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_ST_U:
23567c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
23577c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SEND_BUF_U:
23587c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SET_TAGSYNC_U:
23597c478bd9Sstevel@tonic-gate 	case IXL1394_OP_SET_SKIPMODE_U:
23607c478bd9Sstevel@tonic-gate 		return (B_TRUE);
23617c478bd9Sstevel@tonic-gate 	default:
23627c478bd9Sstevel@tonic-gate 		return (B_FALSE);
23637c478bd9Sstevel@tonic-gate 	}
23647c478bd9Sstevel@tonic-gate }
2365