1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright (c) 1999-2000 by Sun Microsystems, Inc.
24*7c478bd9Sstevel@tonic-gate  * All rights reserved.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate /*
30*7c478bd9Sstevel@tonic-gate  * hci1394_q.c
31*7c478bd9Sstevel@tonic-gate  *    This code decouples some of the OpenHCI async descriptor logic/structures
32*7c478bd9Sstevel@tonic-gate  *    from the async processing.  The goal was to combine as much of the
33*7c478bd9Sstevel@tonic-gate  *    duplicate code as possible for the different type of async transfers
34*7c478bd9Sstevel@tonic-gate  *    without going too overboard.
35*7c478bd9Sstevel@tonic-gate  *
36*7c478bd9Sstevel@tonic-gate  *    There are two parts to the Q, the descriptor buffer and the data buffer.
37*7c478bd9Sstevel@tonic-gate  *    For the most part, data to be transmitted and data which is received go
38*7c478bd9Sstevel@tonic-gate  *    in the data buffers.  The information of where to get the data and put
39*7c478bd9Sstevel@tonic-gate  *    the data reside in the descriptor buffers. There are exceptions to this.
40*7c478bd9Sstevel@tonic-gate  */
41*7c478bd9Sstevel@tonic-gate 
42*7c478bd9Sstevel@tonic-gate 
43*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
44*7c478bd9Sstevel@tonic-gate #include <sys/conf.h>
45*7c478bd9Sstevel@tonic-gate #include <sys/ddi.h>
46*7c478bd9Sstevel@tonic-gate #include <sys/modctl.h>
47*7c478bd9Sstevel@tonic-gate #include <sys/stat.h>
48*7c478bd9Sstevel@tonic-gate #include <sys/sunddi.h>
49*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
50*7c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
51*7c478bd9Sstevel@tonic-gate #include <sys/note.h>
52*7c478bd9Sstevel@tonic-gate 
53*7c478bd9Sstevel@tonic-gate #include <sys/1394/adapters/hci1394.h>
54*7c478bd9Sstevel@tonic-gate 
55*7c478bd9Sstevel@tonic-gate 
56*7c478bd9Sstevel@tonic-gate static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
57*7c478bd9Sstevel@tonic-gate     uint32_t *io_addr);
58*7c478bd9Sstevel@tonic-gate static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
59*7c478bd9Sstevel@tonic-gate static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
60*7c478bd9Sstevel@tonic-gate static void hci1394_q_reset(hci1394_q_handle_t q_handle);
61*7c478bd9Sstevel@tonic-gate static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
62*7c478bd9Sstevel@tonic-gate 
63*7c478bd9Sstevel@tonic-gate static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
64*7c478bd9Sstevel@tonic-gate     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
65*7c478bd9Sstevel@tonic-gate     uint_t hdrsize);
66*7c478bd9Sstevel@tonic-gate static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
67*7c478bd9Sstevel@tonic-gate     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
68*7c478bd9Sstevel@tonic-gate     uint_t hdrsize);
69*7c478bd9Sstevel@tonic-gate static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
70*7c478bd9Sstevel@tonic-gate     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
71*7c478bd9Sstevel@tonic-gate     uint_t datasize);
72*7c478bd9Sstevel@tonic-gate static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
73*7c478bd9Sstevel@tonic-gate     uint8_t *data, uint_t datasize);
74*7c478bd9Sstevel@tonic-gate static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
75*7c478bd9Sstevel@tonic-gate     hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
76*7c478bd9Sstevel@tonic-gate 
77*7c478bd9Sstevel@tonic-gate static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
78*7c478bd9Sstevel@tonic-gate     hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
79*7c478bd9Sstevel@tonic-gate 
80*7c478bd9Sstevel@tonic-gate _NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
81*7c478bd9Sstevel@tonic-gate 
82*7c478bd9Sstevel@tonic-gate /*
83*7c478bd9Sstevel@tonic-gate  * hci1394_q_init()
84*7c478bd9Sstevel@tonic-gate  *    Initialize a Q.  A Q consists of a descriptor buffer and a data buffer and
85*7c478bd9Sstevel@tonic-gate  *    can be either an AT or AR Q. hci1394_q_init() returns a handle which
86*7c478bd9Sstevel@tonic-gate  *    should be used for the reset of the hci1394_q_* calls.
87*7c478bd9Sstevel@tonic-gate  */
88*7c478bd9Sstevel@tonic-gate int
89*7c478bd9Sstevel@tonic-gate hci1394_q_init(hci1394_drvinfo_t *drvinfo,
90*7c478bd9Sstevel@tonic-gate     hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
91*7c478bd9Sstevel@tonic-gate     hci1394_q_handle_t *q_handle)
92*7c478bd9Sstevel@tonic-gate {
93*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *desc;
94*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *data;
95*7c478bd9Sstevel@tonic-gate 	hci1394_buf_parms_t parms;
96*7c478bd9Sstevel@tonic-gate 	hci1394_q_t *q;
97*7c478bd9Sstevel@tonic-gate 	int status;
98*7c478bd9Sstevel@tonic-gate 	int index;
99*7c478bd9Sstevel@tonic-gate 
100*7c478bd9Sstevel@tonic-gate 
101*7c478bd9Sstevel@tonic-gate 	ASSERT(drvinfo != NULL);
102*7c478bd9Sstevel@tonic-gate 	ASSERT(qinfo != NULL);
103*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
104*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_init_enter, HCI1394_TNF_HAL_STACK, "");
105*7c478bd9Sstevel@tonic-gate 
106*7c478bd9Sstevel@tonic-gate 	/*
107*7c478bd9Sstevel@tonic-gate 	 * allocate the memory to track this Q.  Initialize the internal Q
108*7c478bd9Sstevel@tonic-gate 	 * structure.
109*7c478bd9Sstevel@tonic-gate 	 */
110*7c478bd9Sstevel@tonic-gate 	q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
111*7c478bd9Sstevel@tonic-gate 	q->q_drvinfo = drvinfo;
112*7c478bd9Sstevel@tonic-gate 	q->q_info = *qinfo;
113*7c478bd9Sstevel@tonic-gate 	q->q_ohci = ohci_handle;
114*7c478bd9Sstevel@tonic-gate 	mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
115*7c478bd9Sstevel@tonic-gate 	desc = &q->q_desc;
116*7c478bd9Sstevel@tonic-gate 	data = &q->q_data;
117*7c478bd9Sstevel@tonic-gate 
118*7c478bd9Sstevel@tonic-gate 	/*
119*7c478bd9Sstevel@tonic-gate 	 * Allocate the Descriptor buffer.
120*7c478bd9Sstevel@tonic-gate 	 *
121*7c478bd9Sstevel@tonic-gate 	 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
122*7c478bd9Sstevel@tonic-gate 	 * after we have tested the multiple cookie code on x86.
123*7c478bd9Sstevel@tonic-gate 	 */
124*7c478bd9Sstevel@tonic-gate 	parms.bp_length = qinfo->qi_desc_size;
125*7c478bd9Sstevel@tonic-gate 	parms.bp_max_cookies = 1;
126*7c478bd9Sstevel@tonic-gate 	parms.bp_alignment = 16;
127*7c478bd9Sstevel@tonic-gate 	status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
128*7c478bd9Sstevel@tonic-gate 	    &desc->qb_buf_handle);
129*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
130*7c478bd9Sstevel@tonic-gate 		mutex_destroy(&q->q_mutex);
131*7c478bd9Sstevel@tonic-gate 		kmem_free(q, sizeof (hci1394_q_t));
132*7c478bd9Sstevel@tonic-gate 		*q_handle = NULL;
133*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_init_bae_fail, HCI1394_TNF_HAL_ERROR, "");
134*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
135*7c478bd9Sstevel@tonic-gate 		    "");
136*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
137*7c478bd9Sstevel@tonic-gate 	}
138*7c478bd9Sstevel@tonic-gate 
139*7c478bd9Sstevel@tonic-gate 	/* Copy in buffer cookies into our local cookie array */
140*7c478bd9Sstevel@tonic-gate 	desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
141*7c478bd9Sstevel@tonic-gate 	for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
142*7c478bd9Sstevel@tonic-gate 		ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
143*7c478bd9Sstevel@tonic-gate 		    &desc->qb_buf.bi_cookie);
144*7c478bd9Sstevel@tonic-gate 		desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
145*7c478bd9Sstevel@tonic-gate 	}
146*7c478bd9Sstevel@tonic-gate 
147*7c478bd9Sstevel@tonic-gate 	/*
148*7c478bd9Sstevel@tonic-gate 	 * Allocate the Data buffer.
149*7c478bd9Sstevel@tonic-gate 	 *
150*7c478bd9Sstevel@tonic-gate 	 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
151*7c478bd9Sstevel@tonic-gate 	 * after we have tested the multiple cookie code on x86.
152*7c478bd9Sstevel@tonic-gate 	 */
153*7c478bd9Sstevel@tonic-gate 	parms.bp_length = qinfo->qi_data_size;
154*7c478bd9Sstevel@tonic-gate 	parms.bp_max_cookies = 1;
155*7c478bd9Sstevel@tonic-gate 	parms.bp_alignment = 16;
156*7c478bd9Sstevel@tonic-gate 	status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
157*7c478bd9Sstevel@tonic-gate 	    &data->qb_buf_handle);
158*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
159*7c478bd9Sstevel@tonic-gate 		hci1394_buf_free(&data->qb_buf_handle);
160*7c478bd9Sstevel@tonic-gate 		mutex_destroy(&q->q_mutex);
161*7c478bd9Sstevel@tonic-gate 		kmem_free(q, sizeof (hci1394_q_t));
162*7c478bd9Sstevel@tonic-gate 		*q_handle = NULL;
163*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_init_baa_fail, HCI1394_TNF_HAL_ERROR, "");
164*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
165*7c478bd9Sstevel@tonic-gate 		    "");
166*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
167*7c478bd9Sstevel@tonic-gate 	}
168*7c478bd9Sstevel@tonic-gate 
169*7c478bd9Sstevel@tonic-gate 	/*
170*7c478bd9Sstevel@tonic-gate 	 * We must have at least 2 ARQ data buffers, If we only have one, we
171*7c478bd9Sstevel@tonic-gate 	 * will artificially create 2. We must have 2 so that we always have a
172*7c478bd9Sstevel@tonic-gate 	 * descriptor with free data space to write AR data to. When one is
173*7c478bd9Sstevel@tonic-gate 	 * empty, it will take us a bit to get a new descriptor back into the
174*7c478bd9Sstevel@tonic-gate 	 * chain.
175*7c478bd9Sstevel@tonic-gate 	 */
176*7c478bd9Sstevel@tonic-gate 	if ((qinfo->qi_mode == HCI1394_ARQ) &&
177*7c478bd9Sstevel@tonic-gate 	    (data->qb_buf.bi_cookie_count == 1)) {
178*7c478bd9Sstevel@tonic-gate 		data->qb_buf.bi_cookie_count = 2;
179*7c478bd9Sstevel@tonic-gate 		data->qb_cookie[0] = data->qb_buf.bi_cookie;
180*7c478bd9Sstevel@tonic-gate 		data->qb_cookie[0].dmac_size /= 2;
181*7c478bd9Sstevel@tonic-gate 		data->qb_cookie[1] = data->qb_cookie[0];
182*7c478bd9Sstevel@tonic-gate 		data->qb_cookie[1].dmac_laddress =
183*7c478bd9Sstevel@tonic-gate 		    data->qb_cookie[0].dmac_laddress +
184*7c478bd9Sstevel@tonic-gate 		    data->qb_cookie[0].dmac_size;
185*7c478bd9Sstevel@tonic-gate 		data->qb_cookie[1].dmac_address =
186*7c478bd9Sstevel@tonic-gate 		    data->qb_cookie[0].dmac_address +
187*7c478bd9Sstevel@tonic-gate 		    data->qb_cookie[0].dmac_size;
188*7c478bd9Sstevel@tonic-gate 
189*7c478bd9Sstevel@tonic-gate 	/* We have more than 1 cookie or we are an AT Q */
190*7c478bd9Sstevel@tonic-gate 	} else {
191*7c478bd9Sstevel@tonic-gate 		/* Copy in buffer cookies into our local cookie array */
192*7c478bd9Sstevel@tonic-gate 		data->qb_cookie[0] = data->qb_buf.bi_cookie;
193*7c478bd9Sstevel@tonic-gate 		for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
194*7c478bd9Sstevel@tonic-gate 			ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
195*7c478bd9Sstevel@tonic-gate 			    &data->qb_buf.bi_cookie);
196*7c478bd9Sstevel@tonic-gate 			data->qb_cookie[index] = data->qb_buf.bi_cookie;
197*7c478bd9Sstevel@tonic-gate 		}
198*7c478bd9Sstevel@tonic-gate 	}
199*7c478bd9Sstevel@tonic-gate 
200*7c478bd9Sstevel@tonic-gate 	/* The top and bottom of the Q are only set once */
201*7c478bd9Sstevel@tonic-gate 	desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
202*7c478bd9Sstevel@tonic-gate 	desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
203*7c478bd9Sstevel@tonic-gate 	    desc->qb_buf.bi_real_length - 1;
204*7c478bd9Sstevel@tonic-gate 	data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
205*7c478bd9Sstevel@tonic-gate 	data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
206*7c478bd9Sstevel@tonic-gate 	    data->qb_buf.bi_real_length - 1;
207*7c478bd9Sstevel@tonic-gate 
208*7c478bd9Sstevel@tonic-gate 	/*
209*7c478bd9Sstevel@tonic-gate 	 * reset the Q pointers to their original settings.  Setup IM
210*7c478bd9Sstevel@tonic-gate 	 * descriptors if this is an AR Q.
211*7c478bd9Sstevel@tonic-gate 	 */
212*7c478bd9Sstevel@tonic-gate 	hci1394_q_reset(q);
213*7c478bd9Sstevel@tonic-gate 
214*7c478bd9Sstevel@tonic-gate 	/* if this is an AT Q, create a queued list for the AT descriptors */
215*7c478bd9Sstevel@tonic-gate 	if (qinfo->qi_mode == HCI1394_ATQ) {
216*7c478bd9Sstevel@tonic-gate 		hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
217*7c478bd9Sstevel@tonic-gate 	}
218*7c478bd9Sstevel@tonic-gate 
219*7c478bd9Sstevel@tonic-gate 	*q_handle = q;
220*7c478bd9Sstevel@tonic-gate 
221*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK, "");
222*7c478bd9Sstevel@tonic-gate 
223*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
224*7c478bd9Sstevel@tonic-gate }
225*7c478bd9Sstevel@tonic-gate 
226*7c478bd9Sstevel@tonic-gate 
227*7c478bd9Sstevel@tonic-gate /*
228*7c478bd9Sstevel@tonic-gate  * hci1394_q_fini()
229*7c478bd9Sstevel@tonic-gate  *    Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
230*7c478bd9Sstevel@tonic-gate  *    handle is used for the parameter.  fini() will set your handle to NULL
231*7c478bd9Sstevel@tonic-gate  *    before returning.
232*7c478bd9Sstevel@tonic-gate  */
233*7c478bd9Sstevel@tonic-gate void
234*7c478bd9Sstevel@tonic-gate hci1394_q_fini(hci1394_q_handle_t *q_handle)
235*7c478bd9Sstevel@tonic-gate {
236*7c478bd9Sstevel@tonic-gate 	hci1394_q_t *q;
237*7c478bd9Sstevel@tonic-gate 
238*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
239*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_fini_enter, HCI1394_TNF_HAL_STACK, "");
240*7c478bd9Sstevel@tonic-gate 
241*7c478bd9Sstevel@tonic-gate 	q = *q_handle;
242*7c478bd9Sstevel@tonic-gate 	if (q->q_info.qi_mode == HCI1394_ATQ) {
243*7c478bd9Sstevel@tonic-gate 		hci1394_tlist_fini(&q->q_queued_list);
244*7c478bd9Sstevel@tonic-gate 	}
245*7c478bd9Sstevel@tonic-gate 	mutex_destroy(&q->q_mutex);
246*7c478bd9Sstevel@tonic-gate 	hci1394_buf_free(&q->q_desc.qb_buf_handle);
247*7c478bd9Sstevel@tonic-gate 	hci1394_buf_free(&q->q_data.qb_buf_handle);
248*7c478bd9Sstevel@tonic-gate 	kmem_free(q, sizeof (hci1394_q_t));
249*7c478bd9Sstevel@tonic-gate 	*q_handle = NULL;
250*7c478bd9Sstevel@tonic-gate 
251*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_fini_exit, HCI1394_TNF_HAL_STACK, "");
252*7c478bd9Sstevel@tonic-gate }
253*7c478bd9Sstevel@tonic-gate 
254*7c478bd9Sstevel@tonic-gate 
255*7c478bd9Sstevel@tonic-gate /*
256*7c478bd9Sstevel@tonic-gate  * hci1394_q_buf_setup()
257*7c478bd9Sstevel@tonic-gate  *    Initialization of buffer pointers which are present in both the descriptor
258*7c478bd9Sstevel@tonic-gate  *    buffer and data buffer (No reason to duplicate the code)
259*7c478bd9Sstevel@tonic-gate  */
260*7c478bd9Sstevel@tonic-gate static void
261*7c478bd9Sstevel@tonic-gate hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
262*7c478bd9Sstevel@tonic-gate {
263*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
264*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_enter, HCI1394_TNF_HAL_STACK, "");
265*7c478bd9Sstevel@tonic-gate 
266*7c478bd9Sstevel@tonic-gate 	/* start with the first cookie */
267*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current_buf = 0;
268*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
269*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
270*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
271*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
272*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_offset = 0;
273*7c478bd9Sstevel@tonic-gate 
274*7c478bd9Sstevel@tonic-gate 	/*
275*7c478bd9Sstevel@tonic-gate 	 * The free_buf and free pointer will change everytime an ACK (of some
276*7c478bd9Sstevel@tonic-gate 	 * type) is processed.  Free is the last byte in the last cookie.
277*7c478bd9Sstevel@tonic-gate 	 */
278*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
279*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
280*7c478bd9Sstevel@tonic-gate 
281*7c478bd9Sstevel@tonic-gate 	/*
282*7c478bd9Sstevel@tonic-gate 	 * Start with no space to write descriptors.  We first need to call
283*7c478bd9Sstevel@tonic-gate 	 * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
284*7c478bd9Sstevel@tonic-gate 	 */
285*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_resv_size = 0;
286*7c478bd9Sstevel@tonic-gate 
287*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_exit, HCI1394_TNF_HAL_STACK, "");
288*7c478bd9Sstevel@tonic-gate }
289*7c478bd9Sstevel@tonic-gate 
290*7c478bd9Sstevel@tonic-gate 
291*7c478bd9Sstevel@tonic-gate /*
292*7c478bd9Sstevel@tonic-gate  * hci1394_q_reset()
293*7c478bd9Sstevel@tonic-gate  *    Resets the buffers to an initial state.  This should be called during
294*7c478bd9Sstevel@tonic-gate  *    attach and resume.
295*7c478bd9Sstevel@tonic-gate  */
296*7c478bd9Sstevel@tonic-gate static void
297*7c478bd9Sstevel@tonic-gate hci1394_q_reset(hci1394_q_handle_t q_handle)
298*7c478bd9Sstevel@tonic-gate {
299*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *desc;
300*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *data;
301*7c478bd9Sstevel@tonic-gate 	int index;
302*7c478bd9Sstevel@tonic-gate 
303*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
304*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_reset_enter, HCI1394_TNF_HAL_STACK, "");
305*7c478bd9Sstevel@tonic-gate 
306*7c478bd9Sstevel@tonic-gate 	mutex_enter(&q_handle->q_mutex);
307*7c478bd9Sstevel@tonic-gate 	desc = &q_handle->q_desc;
308*7c478bd9Sstevel@tonic-gate 	data = &q_handle->q_data;
309*7c478bd9Sstevel@tonic-gate 
310*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_setup(desc);
311*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_setup(data);
312*7c478bd9Sstevel@tonic-gate 
313*7c478bd9Sstevel@tonic-gate 	/* DMA starts off stopped, no previous descriptor to link from */
314*7c478bd9Sstevel@tonic-gate 	q_handle->q_dma_running = B_FALSE;
315*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt = 0;
316*7c478bd9Sstevel@tonic-gate 	q_handle->q_previous = NULL;
317*7c478bd9Sstevel@tonic-gate 
318*7c478bd9Sstevel@tonic-gate 	/* If this is an AR Q, setup IM's for the data buffers that we have */
319*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
320*7c478bd9Sstevel@tonic-gate 		/*
321*7c478bd9Sstevel@tonic-gate 		 * This points to where to find the first IM descriptor.  Since
322*7c478bd9Sstevel@tonic-gate 		 * we just reset the pointers in hci1394_q_buf_setup(), the
323*7c478bd9Sstevel@tonic-gate 		 * first IM we write below will be found at the top of the Q.
324*7c478bd9Sstevel@tonic-gate 		 */
325*7c478bd9Sstevel@tonic-gate 		q_handle->q_head = desc->qb_ptrs.qp_top;
326*7c478bd9Sstevel@tonic-gate 
327*7c478bd9Sstevel@tonic-gate 		for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
328*7c478bd9Sstevel@tonic-gate 			hci1394_q_ar_write_IM(q_handle, desc,
329*7c478bd9Sstevel@tonic-gate 			    data->qb_cookie[index].dmac_address,
330*7c478bd9Sstevel@tonic-gate 			    data->qb_cookie[index].dmac_size);
331*7c478bd9Sstevel@tonic-gate 		}
332*7c478bd9Sstevel@tonic-gate 
333*7c478bd9Sstevel@tonic-gate 		/*
334*7c478bd9Sstevel@tonic-gate 		 * The space left in the current IM is the size of the buffer.
335*7c478bd9Sstevel@tonic-gate 		 * The current buffer is the first buffer added to the AR Q.
336*7c478bd9Sstevel@tonic-gate 		 */
337*7c478bd9Sstevel@tonic-gate 		q_handle->q_space_left = data->qb_cookie[0].dmac_size;
338*7c478bd9Sstevel@tonic-gate 	}
339*7c478bd9Sstevel@tonic-gate 
340*7c478bd9Sstevel@tonic-gate 	mutex_exit(&q_handle->q_mutex);
341*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_reset_exit, HCI1394_TNF_HAL_STACK, "");
342*7c478bd9Sstevel@tonic-gate }
343*7c478bd9Sstevel@tonic-gate 
344*7c478bd9Sstevel@tonic-gate 
345*7c478bd9Sstevel@tonic-gate /*
346*7c478bd9Sstevel@tonic-gate  * hci1394_q_resume()
347*7c478bd9Sstevel@tonic-gate  *    This is called during a resume (after a successful suspend). Currently
348*7c478bd9Sstevel@tonic-gate  *    we only call reset.  Since this is not a time critical function, we will
349*7c478bd9Sstevel@tonic-gate  *    leave this as a separate function to increase readability.
350*7c478bd9Sstevel@tonic-gate  */
351*7c478bd9Sstevel@tonic-gate void
352*7c478bd9Sstevel@tonic-gate hci1394_q_resume(hci1394_q_handle_t q_handle)
353*7c478bd9Sstevel@tonic-gate {
354*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
355*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_resume_enter, HCI1394_TNF_HAL_STACK, "");
356*7c478bd9Sstevel@tonic-gate 	hci1394_q_reset(q_handle);
357*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_resume_exit, HCI1394_TNF_HAL_STACK, "");
358*7c478bd9Sstevel@tonic-gate }
359*7c478bd9Sstevel@tonic-gate 
360*7c478bd9Sstevel@tonic-gate 
361*7c478bd9Sstevel@tonic-gate /*
362*7c478bd9Sstevel@tonic-gate  * hci1394_q_stop()
363*7c478bd9Sstevel@tonic-gate  *    This call informs us that a DMA engine has been stopped.  It does not
364*7c478bd9Sstevel@tonic-gate  *    perform the actual stop. We need to know this so that when we add a
365*7c478bd9Sstevel@tonic-gate  *    new descriptor, we do a start instead of a wake.
366*7c478bd9Sstevel@tonic-gate  */
367*7c478bd9Sstevel@tonic-gate void
368*7c478bd9Sstevel@tonic-gate hci1394_q_stop(hci1394_q_handle_t q_handle)
369*7c478bd9Sstevel@tonic-gate {
370*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
371*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_stop_enter, HCI1394_TNF_HAL_STACK, "");
372*7c478bd9Sstevel@tonic-gate 	mutex_enter(&q_handle->q_mutex);
373*7c478bd9Sstevel@tonic-gate 	q_handle->q_dma_running = B_FALSE;
374*7c478bd9Sstevel@tonic-gate 	mutex_exit(&q_handle->q_mutex);
375*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_stop_exit, HCI1394_TNF_HAL_STACK, "");
376*7c478bd9Sstevel@tonic-gate }
377*7c478bd9Sstevel@tonic-gate 
378*7c478bd9Sstevel@tonic-gate 
379*7c478bd9Sstevel@tonic-gate /*
380*7c478bd9Sstevel@tonic-gate  * hci1394_q_reserve()
381*7c478bd9Sstevel@tonic-gate  *    Reserve space in the AT descriptor or data buffer. This ensures that we
382*7c478bd9Sstevel@tonic-gate  *    can get a contiguous buffer. Descriptors have to be in a contiguous
383*7c478bd9Sstevel@tonic-gate  *    buffer. Data does not have to be in a contiguous buffer but we do this to
384*7c478bd9Sstevel@tonic-gate  *    reduce complexity. For systems with small page sizes (e.g. x86), this
385*7c478bd9Sstevel@tonic-gate  *    could result in inefficient use of the data buffers when sending large
386*7c478bd9Sstevel@tonic-gate  *    data blocks (this only applies to non-physical block write ATREQs and
387*7c478bd9Sstevel@tonic-gate  *    block read ATRESP). Since it looks like most protocols that use large data
388*7c478bd9Sstevel@tonic-gate  *    blocks (like SPB-2), use physical transfers to do this (due to their
389*7c478bd9Sstevel@tonic-gate  *    efficiency), this will probably not be a real world problem.  If it turns
390*7c478bd9Sstevel@tonic-gate  *    out to be a problem, the options are to force a single cookie for the data
391*7c478bd9Sstevel@tonic-gate  *    buffer, allow multiple cookies and have a larger data space, or change the
392*7c478bd9Sstevel@tonic-gate  *    data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
393*7c478bd9Sstevel@tonic-gate  */
394*7c478bd9Sstevel@tonic-gate static int
395*7c478bd9Sstevel@tonic-gate hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
396*7c478bd9Sstevel@tonic-gate {
397*7c478bd9Sstevel@tonic-gate 	uint_t aligned_size;
398*7c478bd9Sstevel@tonic-gate 
399*7c478bd9Sstevel@tonic-gate 
400*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
401*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_reserve_enter, HCI1394_TNF_HAL_STACK, "");
402*7c478bd9Sstevel@tonic-gate 
403*7c478bd9Sstevel@tonic-gate 	/* Save backup of pointers in case we have to unreserve */
404*7c478bd9Sstevel@tonic-gate 	qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
405*7c478bd9Sstevel@tonic-gate 
406*7c478bd9Sstevel@tonic-gate 	/*
407*7c478bd9Sstevel@tonic-gate 	 * Make sure all alloc's are quadlet aligned. The data doesn't have to
408*7c478bd9Sstevel@tonic-gate 	 * be, so we will force it to be.
409*7c478bd9Sstevel@tonic-gate 	 */
410*7c478bd9Sstevel@tonic-gate 	aligned_size = HCI1394_ALIGN_QUAD(size);
411*7c478bd9Sstevel@tonic-gate 
412*7c478bd9Sstevel@tonic-gate 	/*
413*7c478bd9Sstevel@tonic-gate 	 * if the free pointer is in the current buffer and the free pointer
414*7c478bd9Sstevel@tonic-gate 	 * is below the current pointer (i.e. has not wrapped around)
415*7c478bd9Sstevel@tonic-gate 	 */
416*7c478bd9Sstevel@tonic-gate 	if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
417*7c478bd9Sstevel@tonic-gate 	    (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
418*7c478bd9Sstevel@tonic-gate 		/*
419*7c478bd9Sstevel@tonic-gate 		 * The free pointer is in this buffer below the current pointer.
420*7c478bd9Sstevel@tonic-gate 		 * Check to see if we have enough free space left.
421*7c478bd9Sstevel@tonic-gate 		 */
422*7c478bd9Sstevel@tonic-gate 		if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
423*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_ptrs.qp_free) {
424*7c478bd9Sstevel@tonic-gate 			/* Setup up our reserved size, return the IO address */
425*7c478bd9Sstevel@tonic-gate 			qbuf->qb_ptrs.qp_resv_size = aligned_size;
426*7c478bd9Sstevel@tonic-gate 			*io_addr = (uint32_t)(qbuf->qb_cookie[
427*7c478bd9Sstevel@tonic-gate 			    qbuf->qb_ptrs.qp_current_buf].dmac_address +
428*7c478bd9Sstevel@tonic-gate 			    qbuf->qb_ptrs.qp_offset);
429*7c478bd9Sstevel@tonic-gate 
430*7c478bd9Sstevel@tonic-gate 		/*
431*7c478bd9Sstevel@tonic-gate 		 * The free pointer is in this buffer below the current pointer.
432*7c478bd9Sstevel@tonic-gate 		 * We do not have enough free space for the alloc. Return
433*7c478bd9Sstevel@tonic-gate 		 * failure.
434*7c478bd9Sstevel@tonic-gate 		 */
435*7c478bd9Sstevel@tonic-gate 		} else {
436*7c478bd9Sstevel@tonic-gate 			qbuf->qb_ptrs.qp_resv_size = 0;
437*7c478bd9Sstevel@tonic-gate 			TNF_PROBE_0(hci1394_q_reserve_ns_fail,
438*7c478bd9Sstevel@tonic-gate 			    HCI1394_TNF_HAL_ERROR, "");
439*7c478bd9Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
440*7c478bd9Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK, "");
441*7c478bd9Sstevel@tonic-gate 			return (DDI_FAILURE);
442*7c478bd9Sstevel@tonic-gate 		}
443*7c478bd9Sstevel@tonic-gate 
444*7c478bd9Sstevel@tonic-gate 	/*
445*7c478bd9Sstevel@tonic-gate 	 * If there is not enough room to fit in the current buffer (not
446*7c478bd9Sstevel@tonic-gate 	 * including wrap around), we will go to the next buffer and check
447*7c478bd9Sstevel@tonic-gate 	 * there. If we only have one buffer (i.e. one cookie), we will end up
448*7c478bd9Sstevel@tonic-gate 	 * staying at the current buffer and wrapping the address back to the
449*7c478bd9Sstevel@tonic-gate 	 * top.
450*7c478bd9Sstevel@tonic-gate 	 */
451*7c478bd9Sstevel@tonic-gate 	} else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
452*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_ptrs.qp_end) {
453*7c478bd9Sstevel@tonic-gate 		/* Go to the next buffer (or the top of ours for one cookie) */
454*7c478bd9Sstevel@tonic-gate 		hci1394_q_next_buf(qbuf);
455*7c478bd9Sstevel@tonic-gate 
456*7c478bd9Sstevel@tonic-gate 		/* If the free pointer is in the new current buffer */
457*7c478bd9Sstevel@tonic-gate 		if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
458*7c478bd9Sstevel@tonic-gate 			/*
459*7c478bd9Sstevel@tonic-gate 			 * The free pointer is in this buffer. If we do not have
460*7c478bd9Sstevel@tonic-gate 			 * enough free space for the alloc. Return failure.
461*7c478bd9Sstevel@tonic-gate 			 */
462*7c478bd9Sstevel@tonic-gate 			if ((qbuf->qb_ptrs.qp_current + aligned_size) >
463*7c478bd9Sstevel@tonic-gate 			    qbuf->qb_ptrs.qp_free) {
464*7c478bd9Sstevel@tonic-gate 				qbuf->qb_ptrs.qp_resv_size = 0;
465*7c478bd9Sstevel@tonic-gate 				TNF_PROBE_0(hci1394_q_reserve_ns_fail,
466*7c478bd9Sstevel@tonic-gate 				    HCI1394_TNF_HAL_ERROR, "");
467*7c478bd9Sstevel@tonic-gate 				TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
468*7c478bd9Sstevel@tonic-gate 				    HCI1394_TNF_HAL_STACK, "");
469*7c478bd9Sstevel@tonic-gate 				return (DDI_FAILURE);
470*7c478bd9Sstevel@tonic-gate 			/*
471*7c478bd9Sstevel@tonic-gate 			 * The free pointer is in this buffer. We have enough
472*7c478bd9Sstevel@tonic-gate 			 * free space left.
473*7c478bd9Sstevel@tonic-gate 			 */
474*7c478bd9Sstevel@tonic-gate 			} else {
475*7c478bd9Sstevel@tonic-gate 				/*
476*7c478bd9Sstevel@tonic-gate 				 * Setup up our reserved size, return the IO
477*7c478bd9Sstevel@tonic-gate 				 * address
478*7c478bd9Sstevel@tonic-gate 				 */
479*7c478bd9Sstevel@tonic-gate 				qbuf->qb_ptrs.qp_resv_size = aligned_size;
480*7c478bd9Sstevel@tonic-gate 				*io_addr = (uint32_t)(qbuf->qb_cookie[
481*7c478bd9Sstevel@tonic-gate 				    qbuf->qb_ptrs.qp_current_buf].dmac_address +
482*7c478bd9Sstevel@tonic-gate 				    qbuf->qb_ptrs.qp_offset);
483*7c478bd9Sstevel@tonic-gate 			}
484*7c478bd9Sstevel@tonic-gate 
485*7c478bd9Sstevel@tonic-gate 		/*
486*7c478bd9Sstevel@tonic-gate 		 * We switched buffers and the free pointer is still in another
487*7c478bd9Sstevel@tonic-gate 		 * buffer. We have sufficient space in this buffer for the alloc
488*7c478bd9Sstevel@tonic-gate 		 * after changing buffers.
489*7c478bd9Sstevel@tonic-gate 		 */
490*7c478bd9Sstevel@tonic-gate 		} else {
491*7c478bd9Sstevel@tonic-gate 			/* Setup up our reserved size, return the IO address */
492*7c478bd9Sstevel@tonic-gate 			qbuf->qb_ptrs.qp_resv_size = aligned_size;
493*7c478bd9Sstevel@tonic-gate 			*io_addr = (uint32_t)(qbuf->qb_cookie[
494*7c478bd9Sstevel@tonic-gate 			    qbuf->qb_ptrs.qp_current_buf].dmac_address +
495*7c478bd9Sstevel@tonic-gate 			    qbuf->qb_ptrs.qp_offset);
496*7c478bd9Sstevel@tonic-gate 		}
497*7c478bd9Sstevel@tonic-gate 	/*
498*7c478bd9Sstevel@tonic-gate 	 * The free pointer is in another buffer. We have sufficient space in
499*7c478bd9Sstevel@tonic-gate 	 * this buffer for the alloc.
500*7c478bd9Sstevel@tonic-gate 	 */
501*7c478bd9Sstevel@tonic-gate 	} else {
502*7c478bd9Sstevel@tonic-gate 		/* Setup up our reserved size, return the IO address */
503*7c478bd9Sstevel@tonic-gate 		qbuf->qb_ptrs.qp_resv_size = aligned_size;
504*7c478bd9Sstevel@tonic-gate 		*io_addr = (uint32_t)(qbuf->qb_cookie[
505*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_ptrs.qp_current_buf].dmac_address +
506*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_ptrs.qp_offset);
507*7c478bd9Sstevel@tonic-gate 	}
508*7c478bd9Sstevel@tonic-gate 
509*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit, HCI1394_TNF_HAL_STACK, "");
510*7c478bd9Sstevel@tonic-gate 
511*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
512*7c478bd9Sstevel@tonic-gate }
513*7c478bd9Sstevel@tonic-gate 
514*7c478bd9Sstevel@tonic-gate /*
515*7c478bd9Sstevel@tonic-gate  * hci1394_q_unreserve()
516*7c478bd9Sstevel@tonic-gate  *    Set the buffer pointer to what they were before hci1394_reserve().  This
517*7c478bd9Sstevel@tonic-gate  *    will be called when we encounter errors during hci1394_q_at*().
518*7c478bd9Sstevel@tonic-gate  */
519*7c478bd9Sstevel@tonic-gate static void
520*7c478bd9Sstevel@tonic-gate hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
521*7c478bd9Sstevel@tonic-gate {
522*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
523*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_unreserve_enter, HCI1394_TNF_HAL_STACK, "");
524*7c478bd9Sstevel@tonic-gate 
525*7c478bd9Sstevel@tonic-gate 	/* Go back to pointer setting before the reserve */
526*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
527*7c478bd9Sstevel@tonic-gate 
528*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_unreserve_exit, HCI1394_TNF_HAL_STACK, "");
529*7c478bd9Sstevel@tonic-gate }
530*7c478bd9Sstevel@tonic-gate 
531*7c478bd9Sstevel@tonic-gate 
532*7c478bd9Sstevel@tonic-gate /*
533*7c478bd9Sstevel@tonic-gate  * hci1394_q_next_buf()
534*7c478bd9Sstevel@tonic-gate  *    Set our current buffer to the next cookie.  If we only have one cookie, we
535*7c478bd9Sstevel@tonic-gate  *    will go back to the top of our buffer.
536*7c478bd9Sstevel@tonic-gate  */
537*7c478bd9Sstevel@tonic-gate void
538*7c478bd9Sstevel@tonic-gate hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
539*7c478bd9Sstevel@tonic-gate {
540*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
541*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_next_buf_enter, HCI1394_TNF_HAL_STACK, "");
542*7c478bd9Sstevel@tonic-gate 
543*7c478bd9Sstevel@tonic-gate 	/*
544*7c478bd9Sstevel@tonic-gate 	 * go to the next cookie, if we are >= the cookie count, go back to the
545*7c478bd9Sstevel@tonic-gate 	 * first cookie.
546*7c478bd9Sstevel@tonic-gate 	 */
547*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current_buf++;
548*7c478bd9Sstevel@tonic-gate 	if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
549*7c478bd9Sstevel@tonic-gate 		qbuf->qb_ptrs.qp_current_buf = 0;
550*7c478bd9Sstevel@tonic-gate 	}
551*7c478bd9Sstevel@tonic-gate 
552*7c478bd9Sstevel@tonic-gate 	/* adjust the begin, end, current, and offset pointers */
553*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
554*7c478bd9Sstevel@tonic-gate 	if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
555*7c478bd9Sstevel@tonic-gate 		qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
556*7c478bd9Sstevel@tonic-gate 	}
557*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
558*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
559*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
560*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_offset = 0;
561*7c478bd9Sstevel@tonic-gate 
562*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_next_buf_exit, HCI1394_TNF_HAL_STACK, "");
563*7c478bd9Sstevel@tonic-gate }
564*7c478bd9Sstevel@tonic-gate 
565*7c478bd9Sstevel@tonic-gate 
566*7c478bd9Sstevel@tonic-gate /*
567*7c478bd9Sstevel@tonic-gate  * hci1394_q_at()
568*7c478bd9Sstevel@tonic-gate  *    Place an AT command that does NOT need the data buffer into the DMA chain.
569*7c478bd9Sstevel@tonic-gate  *    Some examples of this are quadlet read/write, PHY packets, ATREQ Block
570*7c478bd9Sstevel@tonic-gate  *    Read, and ATRESP block write. result is only valid on failure.
571*7c478bd9Sstevel@tonic-gate  */
572*7c478bd9Sstevel@tonic-gate int
573*7c478bd9Sstevel@tonic-gate hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
574*7c478bd9Sstevel@tonic-gate     hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
575*7c478bd9Sstevel@tonic-gate {
576*7c478bd9Sstevel@tonic-gate 	int status;
577*7c478bd9Sstevel@tonic-gate 	uint32_t ioaddr;
578*7c478bd9Sstevel@tonic-gate 
579*7c478bd9Sstevel@tonic-gate 
580*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
581*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
582*7c478bd9Sstevel@tonic-gate 	ASSERT(hdr != NULL);
583*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_enter, HCI1394_TNF_HAL_STACK, "");
584*7c478bd9Sstevel@tonic-gate 
585*7c478bd9Sstevel@tonic-gate 	mutex_enter(&q_handle->q_mutex);
586*7c478bd9Sstevel@tonic-gate 
587*7c478bd9Sstevel@tonic-gate 	/*
588*7c478bd9Sstevel@tonic-gate 	 * Check the HAL state and generation when the AT Q is locked.  This
589*7c478bd9Sstevel@tonic-gate 	 * will make sure that we get all the commands when we flush the Q's
590*7c478bd9Sstevel@tonic-gate 	 * during a reset or shutdown.
591*7c478bd9Sstevel@tonic-gate 	 */
592*7c478bd9Sstevel@tonic-gate 	if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
593*7c478bd9Sstevel@tonic-gate 	    (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
594*7c478bd9Sstevel@tonic-gate 	    cmd->qc_generation)) {
595*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_INVALID_BUSGEN;
596*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
597*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_at_st_fail, HCI1394_TNF_HAL_ERROR, "");
598*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
599*7c478bd9Sstevel@tonic-gate 		    "");
600*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
601*7c478bd9Sstevel@tonic-gate 	}
602*7c478bd9Sstevel@tonic-gate 
603*7c478bd9Sstevel@tonic-gate 	/* save away the argument to pass up when this command completes */
604*7c478bd9Sstevel@tonic-gate 	cmd->qc_node.tln_addr = cmd;
605*7c478bd9Sstevel@tonic-gate 
606*7c478bd9Sstevel@tonic-gate 	/* we have not written any 16 byte blocks to the descriptor yet */
607*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt = 0;
608*7c478bd9Sstevel@tonic-gate 
609*7c478bd9Sstevel@tonic-gate 	/* Reserve space for an OLI in the descriptor buffer */
610*7c478bd9Sstevel@tonic-gate 	status = hci1394_q_reserve(&q_handle->q_desc,
611*7c478bd9Sstevel@tonic-gate 	    sizeof (hci1394_desc_imm_t), &ioaddr);
612*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
613*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_NOMORE_SPACE;
614*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
615*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_at_qre_fail, HCI1394_TNF_HAL_ERROR, "");
616*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
617*7c478bd9Sstevel@tonic-gate 		    "");
618*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
619*7c478bd9Sstevel@tonic-gate 	}
620*7c478bd9Sstevel@tonic-gate 
621*7c478bd9Sstevel@tonic-gate 	/* write the OLI to the descriptor buffer */
622*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
623*7c478bd9Sstevel@tonic-gate 
624*7c478bd9Sstevel@tonic-gate 	/* Add the AT command to the queued list */
625*7c478bd9Sstevel@tonic-gate 	hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
626*7c478bd9Sstevel@tonic-gate 
627*7c478bd9Sstevel@tonic-gate 	mutex_exit(&q_handle->q_mutex);
628*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK, "");
629*7c478bd9Sstevel@tonic-gate 
630*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
631*7c478bd9Sstevel@tonic-gate }
632*7c478bd9Sstevel@tonic-gate 
633*7c478bd9Sstevel@tonic-gate 
634*7c478bd9Sstevel@tonic-gate /*
635*7c478bd9Sstevel@tonic-gate  * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
636*7c478bd9Sstevel@tonic-gate  *    ATREQ Block read and write's that go through software are not very
637*7c478bd9Sstevel@tonic-gate  *    efficient (one of the reasons to use physical space). A copy is forced
638*7c478bd9Sstevel@tonic-gate  *    on all block reads due to the design of OpenHCI. Writes do not have this
639*7c478bd9Sstevel@tonic-gate  *    same restriction.  This design forces a copy for writes too (we always
640*7c478bd9Sstevel@tonic-gate  *    copy into a data buffer before sending). There are many reasons for this
641*7c478bd9Sstevel@tonic-gate  *    including complexity reduction.  There is a data size threshold where a
642*7c478bd9Sstevel@tonic-gate  *    copy is more expensive than mapping the data buffer address (or worse
643*7c478bd9Sstevel@tonic-gate  *    case a big enough difference where it pays to do it). However, we move
644*7c478bd9Sstevel@tonic-gate  *    block data around in mblks which means that our data may be scattered
645*7c478bd9Sstevel@tonic-gate  *    over many buffers.  This adds to the complexity of mapping and setting
646*7c478bd9Sstevel@tonic-gate  *    up the OpenHCI descriptors.
647*7c478bd9Sstevel@tonic-gate  *
648*7c478bd9Sstevel@tonic-gate  *    If someone really needs a speedup on block write ATREQs, my recommendation
649*7c478bd9Sstevel@tonic-gate  *    would be to add an additional command type at the target interface for a
650*7c478bd9Sstevel@tonic-gate  *    fast block write.  The target driver would pass a mapped io addr to use.
651*7c478bd9Sstevel@tonic-gate  *    A function like "hci1394_q_at_with_ioaddr()" could be created which would
652*7c478bd9Sstevel@tonic-gate  *    be almost an exact copy of hci1394_q_at_with_data() without the
653*7c478bd9Sstevel@tonic-gate  *    hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
654*7c478bd9Sstevel@tonic-gate  */
655*7c478bd9Sstevel@tonic-gate 
656*7c478bd9Sstevel@tonic-gate 
657*7c478bd9Sstevel@tonic-gate /*
658*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_with_data()
659*7c478bd9Sstevel@tonic-gate  *    Place an AT command that does need the data buffer into the DMA chain.
660*7c478bd9Sstevel@tonic-gate  *    The data is passed as a pointer to a kernel virtual address. An example of
661*7c478bd9Sstevel@tonic-gate  *    this is the lock operations. result is only valid on failure.
662*7c478bd9Sstevel@tonic-gate  */
663*7c478bd9Sstevel@tonic-gate int
664*7c478bd9Sstevel@tonic-gate hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
665*7c478bd9Sstevel@tonic-gate     hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
666*7c478bd9Sstevel@tonic-gate     int *result)
667*7c478bd9Sstevel@tonic-gate {
668*7c478bd9Sstevel@tonic-gate 	uint32_t desc_ioaddr;
669*7c478bd9Sstevel@tonic-gate 	uint32_t data_ioaddr;
670*7c478bd9Sstevel@tonic-gate 	int status;
671*7c478bd9Sstevel@tonic-gate 
672*7c478bd9Sstevel@tonic-gate 
673*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
674*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
675*7c478bd9Sstevel@tonic-gate 	ASSERT(hdr != NULL);
676*7c478bd9Sstevel@tonic-gate 	ASSERT(data != NULL);
677*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_enter, HCI1394_TNF_HAL_STACK,
678*7c478bd9Sstevel@tonic-gate 	    "");
679*7c478bd9Sstevel@tonic-gate 
680*7c478bd9Sstevel@tonic-gate 	mutex_enter(&q_handle->q_mutex);
681*7c478bd9Sstevel@tonic-gate 
682*7c478bd9Sstevel@tonic-gate 	/*
683*7c478bd9Sstevel@tonic-gate 	 * Check the HAL state and generation when the AT Q is locked.  This
684*7c478bd9Sstevel@tonic-gate 	 * will make sure that we get all the commands when we flush the Q's
685*7c478bd9Sstevel@tonic-gate 	 * during a reset or shutdown.
686*7c478bd9Sstevel@tonic-gate 	 */
687*7c478bd9Sstevel@tonic-gate 	if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
688*7c478bd9Sstevel@tonic-gate 	    (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
689*7c478bd9Sstevel@tonic-gate 	    cmd->qc_generation)) {
690*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_INVALID_BUSGEN;
691*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
692*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_wd_st_fail,
693*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
694*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
695*7c478bd9Sstevel@tonic-gate 	}
696*7c478bd9Sstevel@tonic-gate 
697*7c478bd9Sstevel@tonic-gate 	/* save away the argument to pass up when this command completes */
698*7c478bd9Sstevel@tonic-gate 	cmd->qc_node.tln_addr = cmd;
699*7c478bd9Sstevel@tonic-gate 
700*7c478bd9Sstevel@tonic-gate 	/* we have not written any 16 byte blocks to the descriptor yet */
701*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt = 0;
702*7c478bd9Sstevel@tonic-gate 
703*7c478bd9Sstevel@tonic-gate 	/* Reserve space for an OMI and OL in the descriptor buffer */
704*7c478bd9Sstevel@tonic-gate 	status = hci1394_q_reserve(&q_handle->q_desc,
705*7c478bd9Sstevel@tonic-gate 	    (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
706*7c478bd9Sstevel@tonic-gate 	    &desc_ioaddr);
707*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
708*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_NOMORE_SPACE;
709*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
710*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_at_wd_qre_fail,
711*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR, "");
712*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
713*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
714*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
715*7c478bd9Sstevel@tonic-gate 	}
716*7c478bd9Sstevel@tonic-gate 
717*7c478bd9Sstevel@tonic-gate 	/* allocate space for data in the data buffer */
718*7c478bd9Sstevel@tonic-gate 	status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
719*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
720*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_NOMORE_SPACE;
721*7c478bd9Sstevel@tonic-gate 		hci1394_q_unreserve(&q_handle->q_desc);
722*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
723*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_at_wd_qra_fail,
724*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR, "");
725*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
726*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
727*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
728*7c478bd9Sstevel@tonic-gate 	}
729*7c478bd9Sstevel@tonic-gate 
730*7c478bd9Sstevel@tonic-gate 	/* Copy data into data buffer */
731*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
732*7c478bd9Sstevel@tonic-gate 
733*7c478bd9Sstevel@tonic-gate 	/* write the OMI to the descriptor buffer */
734*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
735*7c478bd9Sstevel@tonic-gate 
736*7c478bd9Sstevel@tonic-gate 	/* write the OL to the descriptor buffer */
737*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
738*7c478bd9Sstevel@tonic-gate 	    datasize);
739*7c478bd9Sstevel@tonic-gate 
740*7c478bd9Sstevel@tonic-gate 	/* Add the AT command to the queued list */
741*7c478bd9Sstevel@tonic-gate 	hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
742*7c478bd9Sstevel@tonic-gate 
743*7c478bd9Sstevel@tonic-gate 	mutex_exit(&q_handle->q_mutex);
744*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit, HCI1394_TNF_HAL_STACK,
745*7c478bd9Sstevel@tonic-gate 	    "");
746*7c478bd9Sstevel@tonic-gate 
747*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
748*7c478bd9Sstevel@tonic-gate }
749*7c478bd9Sstevel@tonic-gate 
750*7c478bd9Sstevel@tonic-gate 
751*7c478bd9Sstevel@tonic-gate /*
752*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_with_mblk()
753*7c478bd9Sstevel@tonic-gate  *    Place an AT command that does need the data buffer into the DMA chain.
754*7c478bd9Sstevel@tonic-gate  *    The data is passed in mblk_t(s). Examples of this are a block write
755*7c478bd9Sstevel@tonic-gate  *    ATREQ and a block read ATRESP. The services layer and the hal use a
756*7c478bd9Sstevel@tonic-gate  *    private structure (h1394_mblk_t) to keep track of how much of the mblk
757*7c478bd9Sstevel@tonic-gate  *    to send since we may have to break the transfer up into smaller blocks.
758*7c478bd9Sstevel@tonic-gate  *    (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
759*7c478bd9Sstevel@tonic-gate  *    valid on failure.
760*7c478bd9Sstevel@tonic-gate  */
761*7c478bd9Sstevel@tonic-gate int
762*7c478bd9Sstevel@tonic-gate hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
763*7c478bd9Sstevel@tonic-gate     hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
764*7c478bd9Sstevel@tonic-gate {
765*7c478bd9Sstevel@tonic-gate 	uint32_t desc_ioaddr;
766*7c478bd9Sstevel@tonic-gate 	uint32_t data_ioaddr;
767*7c478bd9Sstevel@tonic-gate 	int status;
768*7c478bd9Sstevel@tonic-gate 
769*7c478bd9Sstevel@tonic-gate 
770*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
771*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
772*7c478bd9Sstevel@tonic-gate 	ASSERT(hdr != NULL);
773*7c478bd9Sstevel@tonic-gate 	ASSERT(mblk != NULL);
774*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_enter, HCI1394_TNF_HAL_STACK,
775*7c478bd9Sstevel@tonic-gate 	    "");
776*7c478bd9Sstevel@tonic-gate 
777*7c478bd9Sstevel@tonic-gate 	mutex_enter(&q_handle->q_mutex);
778*7c478bd9Sstevel@tonic-gate 
779*7c478bd9Sstevel@tonic-gate 	/*
780*7c478bd9Sstevel@tonic-gate 	 * Check the HAL state and generation when the AT Q is locked.  This
781*7c478bd9Sstevel@tonic-gate 	 * will make sure that we get all the commands when we flush the Q's
782*7c478bd9Sstevel@tonic-gate 	 * during a reset or shutdown.
783*7c478bd9Sstevel@tonic-gate 	 */
784*7c478bd9Sstevel@tonic-gate 	if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
785*7c478bd9Sstevel@tonic-gate 	    (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
786*7c478bd9Sstevel@tonic-gate 	    cmd->qc_generation)) {
787*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_INVALID_BUSGEN;
788*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
789*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_wm_st_fail,
790*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
791*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
792*7c478bd9Sstevel@tonic-gate 	}
793*7c478bd9Sstevel@tonic-gate 
794*7c478bd9Sstevel@tonic-gate 	/* save away the argument to pass up when this command completes */
795*7c478bd9Sstevel@tonic-gate 	cmd->qc_node.tln_addr = cmd;
796*7c478bd9Sstevel@tonic-gate 
797*7c478bd9Sstevel@tonic-gate 	/* we have not written any 16 byte blocks to the descriptor yet */
798*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt = 0;
799*7c478bd9Sstevel@tonic-gate 
800*7c478bd9Sstevel@tonic-gate 	/* Reserve space for an OMI and OL in the descriptor buffer */
801*7c478bd9Sstevel@tonic-gate 	status = hci1394_q_reserve(&q_handle->q_desc,
802*7c478bd9Sstevel@tonic-gate 	    (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
803*7c478bd9Sstevel@tonic-gate 	    &desc_ioaddr);
804*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
805*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_NOMORE_SPACE;
806*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
807*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_at_wm_qre_fail,
808*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR, "");
809*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
810*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
811*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
812*7c478bd9Sstevel@tonic-gate 	}
813*7c478bd9Sstevel@tonic-gate 
814*7c478bd9Sstevel@tonic-gate 	/* Reserve space for data in the data buffer */
815*7c478bd9Sstevel@tonic-gate 	status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
816*7c478bd9Sstevel@tonic-gate 	    &data_ioaddr);
817*7c478bd9Sstevel@tonic-gate 	if (status != DDI_SUCCESS) {
818*7c478bd9Sstevel@tonic-gate 		*result = H1394_STATUS_NOMORE_SPACE;
819*7c478bd9Sstevel@tonic-gate 		hci1394_q_unreserve(&q_handle->q_desc);
820*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
821*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0(hci1394_q_at_wm_qra_fail,
822*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_ERROR, "");
823*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
824*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
825*7c478bd9Sstevel@tonic-gate 		return (DDI_FAILURE);
826*7c478bd9Sstevel@tonic-gate 	}
827*7c478bd9Sstevel@tonic-gate 
828*7c478bd9Sstevel@tonic-gate 	/* Copy mblk data into data buffer */
829*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
830*7c478bd9Sstevel@tonic-gate 
831*7c478bd9Sstevel@tonic-gate 	/* write the OMI to the descriptor buffer */
832*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
833*7c478bd9Sstevel@tonic-gate 
834*7c478bd9Sstevel@tonic-gate 	/* write the OL to the descriptor buffer */
835*7c478bd9Sstevel@tonic-gate 	hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
836*7c478bd9Sstevel@tonic-gate 	    mblk->length);
837*7c478bd9Sstevel@tonic-gate 
838*7c478bd9Sstevel@tonic-gate 	/* Add the AT command to the queued list */
839*7c478bd9Sstevel@tonic-gate 	hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
840*7c478bd9Sstevel@tonic-gate 
841*7c478bd9Sstevel@tonic-gate 	mutex_exit(&q_handle->q_mutex);
842*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit, HCI1394_TNF_HAL_STACK,
843*7c478bd9Sstevel@tonic-gate 	    "");
844*7c478bd9Sstevel@tonic-gate 
845*7c478bd9Sstevel@tonic-gate 	return (DDI_SUCCESS);
846*7c478bd9Sstevel@tonic-gate }
847*7c478bd9Sstevel@tonic-gate 
848*7c478bd9Sstevel@tonic-gate 
849*7c478bd9Sstevel@tonic-gate /*
850*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_next()
851*7c478bd9Sstevel@tonic-gate  *    Return the next completed AT command in cmd.  If flush_q is true, we will
852*7c478bd9Sstevel@tonic-gate  *    return the command regardless if it finished or not.  We will flush
853*7c478bd9Sstevel@tonic-gate  *    during bus reset processing, shutdown, and detach.
854*7c478bd9Sstevel@tonic-gate  */
855*7c478bd9Sstevel@tonic-gate void
856*7c478bd9Sstevel@tonic-gate hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
857*7c478bd9Sstevel@tonic-gate     hci1394_q_cmd_t **cmd)
858*7c478bd9Sstevel@tonic-gate {
859*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *desc;
860*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *data;
861*7c478bd9Sstevel@tonic-gate 	hci1394_tlist_node_t *node;
862*7c478bd9Sstevel@tonic-gate 	uint32_t cmd_status;
863*7c478bd9Sstevel@tonic-gate 
864*7c478bd9Sstevel@tonic-gate 
865*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
866*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
867*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_next_enter, HCI1394_TNF_HAL_STACK, "");
868*7c478bd9Sstevel@tonic-gate 
869*7c478bd9Sstevel@tonic-gate 	mutex_enter(&q_handle->q_mutex);
870*7c478bd9Sstevel@tonic-gate 
871*7c478bd9Sstevel@tonic-gate 	desc = &q_handle->q_desc;
872*7c478bd9Sstevel@tonic-gate 	data = &q_handle->q_data;
873*7c478bd9Sstevel@tonic-gate 
874*7c478bd9Sstevel@tonic-gate 	/* Sync descriptor buffer */
875*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
876*7c478bd9Sstevel@tonic-gate 	    desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
877*7c478bd9Sstevel@tonic-gate 
878*7c478bd9Sstevel@tonic-gate 	/* Look at the top cmd on the queued list (without removing it) */
879*7c478bd9Sstevel@tonic-gate 	hci1394_tlist_peek(q_handle->q_queued_list, &node);
880*7c478bd9Sstevel@tonic-gate 	if (node == NULL) {
881*7c478bd9Sstevel@tonic-gate 		/* There are no more commands left on the queued list */
882*7c478bd9Sstevel@tonic-gate 		*cmd = NULL;
883*7c478bd9Sstevel@tonic-gate 		mutex_exit(&q_handle->q_mutex);
884*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK,
885*7c478bd9Sstevel@tonic-gate 		    "");
886*7c478bd9Sstevel@tonic-gate 		return;
887*7c478bd9Sstevel@tonic-gate 	}
888*7c478bd9Sstevel@tonic-gate 
889*7c478bd9Sstevel@tonic-gate 	/*
890*7c478bd9Sstevel@tonic-gate 	 * There is a command on the list, read its status and timestamp when
891*7c478bd9Sstevel@tonic-gate 	 * it was sent
892*7c478bd9Sstevel@tonic-gate 	 */
893*7c478bd9Sstevel@tonic-gate 	*cmd = (hci1394_q_cmd_t *)node->tln_addr;
894*7c478bd9Sstevel@tonic-gate 	cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
895*7c478bd9Sstevel@tonic-gate 	(*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
896*7c478bd9Sstevel@tonic-gate 	cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
897*7c478bd9Sstevel@tonic-gate 
898*7c478bd9Sstevel@tonic-gate 	/*
899*7c478bd9Sstevel@tonic-gate 	 * If we are flushing the q (e.g. due to a bus reset), we will return
900*7c478bd9Sstevel@tonic-gate 	 * the command regardless of its completion status. If we are not
901*7c478bd9Sstevel@tonic-gate 	 * flushing the Q and we do not have status on the command (e.g. status
902*7c478bd9Sstevel@tonic-gate 	 * = 0), we are done with this Q for now.
903*7c478bd9Sstevel@tonic-gate 	 */
904*7c478bd9Sstevel@tonic-gate 	if (flush_q == B_FALSE) {
905*7c478bd9Sstevel@tonic-gate 		if (cmd_status == 0) {
906*7c478bd9Sstevel@tonic-gate 			*cmd = NULL;
907*7c478bd9Sstevel@tonic-gate 			mutex_exit(&q_handle->q_mutex);
908*7c478bd9Sstevel@tonic-gate 			TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit,
909*7c478bd9Sstevel@tonic-gate 			    HCI1394_TNF_HAL_STACK, "");
910*7c478bd9Sstevel@tonic-gate 			return;
911*7c478bd9Sstevel@tonic-gate 		}
912*7c478bd9Sstevel@tonic-gate 	}
913*7c478bd9Sstevel@tonic-gate 
914*7c478bd9Sstevel@tonic-gate 	/*
915*7c478bd9Sstevel@tonic-gate 	 * The command completed, remove it from the queued list. There is not
916*7c478bd9Sstevel@tonic-gate 	 * a race condition to delete the node in the list here.  This is the
917*7c478bd9Sstevel@tonic-gate 	 * only place the node will be deleted so we do not need to check the
918*7c478bd9Sstevel@tonic-gate 	 * return status.
919*7c478bd9Sstevel@tonic-gate 	 */
920*7c478bd9Sstevel@tonic-gate 	(void) hci1394_tlist_delete(q_handle->q_queued_list, node);
921*7c478bd9Sstevel@tonic-gate 
922*7c478bd9Sstevel@tonic-gate 	/*
923*7c478bd9Sstevel@tonic-gate 	 * Free the space used by the command in the descriptor and data
924*7c478bd9Sstevel@tonic-gate 	 * buffers.
925*7c478bd9Sstevel@tonic-gate 	 */
926*7c478bd9Sstevel@tonic-gate 	desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
927*7c478bd9Sstevel@tonic-gate 	desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
928*7c478bd9Sstevel@tonic-gate 	if ((*cmd)->qc_data_used == B_TRUE) {
929*7c478bd9Sstevel@tonic-gate 		data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
930*7c478bd9Sstevel@tonic-gate 		data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
931*7c478bd9Sstevel@tonic-gate 	}
932*7c478bd9Sstevel@tonic-gate 
933*7c478bd9Sstevel@tonic-gate 	/* return command status */
934*7c478bd9Sstevel@tonic-gate 	(*cmd)->qc_status = cmd_status;
935*7c478bd9Sstevel@tonic-gate 
936*7c478bd9Sstevel@tonic-gate 	mutex_exit(&q_handle->q_mutex);
937*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK, "");
938*7c478bd9Sstevel@tonic-gate }
939*7c478bd9Sstevel@tonic-gate 
940*7c478bd9Sstevel@tonic-gate 
941*7c478bd9Sstevel@tonic-gate /*
942*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_write_OMI()
943*7c478bd9Sstevel@tonic-gate  *    Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
944*7c478bd9Sstevel@tonic-gate  *    Buffer state information is stored in cmd.  Use the hdr and hdr size for
945*7c478bd9Sstevel@tonic-gate  *    the additional information attached to an immediate descriptor.
946*7c478bd9Sstevel@tonic-gate  */
947*7c478bd9Sstevel@tonic-gate void
948*7c478bd9Sstevel@tonic-gate hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
949*7c478bd9Sstevel@tonic-gate     hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
950*7c478bd9Sstevel@tonic-gate {
951*7c478bd9Sstevel@tonic-gate 	hci1394_desc_imm_t *desc;
952*7c478bd9Sstevel@tonic-gate 	uint32_t data;
953*7c478bd9Sstevel@tonic-gate 
954*7c478bd9Sstevel@tonic-gate 
955*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
956*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
957*7c478bd9Sstevel@tonic-gate 	ASSERT(hdr != NULL);
958*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&q_handle->q_mutex));
959*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_enter, HCI1394_TNF_HAL_STACK,
960*7c478bd9Sstevel@tonic-gate 	    "");
961*7c478bd9Sstevel@tonic-gate 
962*7c478bd9Sstevel@tonic-gate 	/* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
963*7c478bd9Sstevel@tonic-gate 	ASSERT((hdrsize == 8) || (hdrsize == 16));
964*7c478bd9Sstevel@tonic-gate 
965*7c478bd9Sstevel@tonic-gate 	/* Make sure enough room for OMI */
966*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
967*7c478bd9Sstevel@tonic-gate 
968*7c478bd9Sstevel@tonic-gate 	/* Store the offset of the top of this descriptor block */
969*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
970*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_ptrs.qp_begin);
971*7c478bd9Sstevel@tonic-gate 
972*7c478bd9Sstevel@tonic-gate 	/* Setup OpenHCI OMI Header */
973*7c478bd9Sstevel@tonic-gate 	desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
974*7c478bd9Sstevel@tonic-gate 	data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
975*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
976*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
977*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
978*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
979*7c478bd9Sstevel@tonic-gate 
980*7c478bd9Sstevel@tonic-gate 	/*
981*7c478bd9Sstevel@tonic-gate 	 * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
982*7c478bd9Sstevel@tonic-gate 	 * count.
983*7c478bd9Sstevel@tonic-gate 	 */
984*7c478bd9Sstevel@tonic-gate 	ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
985*7c478bd9Sstevel@tonic-gate 	    hdrsize >> 2, DDI_DEV_AUTOINCR);
986*7c478bd9Sstevel@tonic-gate 
987*7c478bd9Sstevel@tonic-gate 	/*
988*7c478bd9Sstevel@tonic-gate 	 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
989*7c478bd9Sstevel@tonic-gate 	 * accordingly.  Update the reserved size and current pointer.
990*7c478bd9Sstevel@tonic-gate 	 */
991*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt += 2;
992*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
993*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
994*7c478bd9Sstevel@tonic-gate 
995*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_exit, HCI1394_TNF_HAL_STACK,
996*7c478bd9Sstevel@tonic-gate 	    "");
997*7c478bd9Sstevel@tonic-gate }
998*7c478bd9Sstevel@tonic-gate 
999*7c478bd9Sstevel@tonic-gate 
1000*7c478bd9Sstevel@tonic-gate /*
1001*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_write_OLI()
1002*7c478bd9Sstevel@tonic-gate  *    Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
1003*7c478bd9Sstevel@tonic-gate  *    Buffer state information is stored in cmd.  Use the hdr and hdr size for
1004*7c478bd9Sstevel@tonic-gate  *    the additional information attached to an immediate descriptor.
1005*7c478bd9Sstevel@tonic-gate  */
1006*7c478bd9Sstevel@tonic-gate void
1007*7c478bd9Sstevel@tonic-gate hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1008*7c478bd9Sstevel@tonic-gate     hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
1009*7c478bd9Sstevel@tonic-gate {
1010*7c478bd9Sstevel@tonic-gate 	hci1394_desc_imm_t *desc;
1011*7c478bd9Sstevel@tonic-gate 	uint32_t data;
1012*7c478bd9Sstevel@tonic-gate 	uint32_t command_ptr;
1013*7c478bd9Sstevel@tonic-gate 	uint32_t tcode;
1014*7c478bd9Sstevel@tonic-gate 
1015*7c478bd9Sstevel@tonic-gate 
1016*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
1017*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
1018*7c478bd9Sstevel@tonic-gate 	ASSERT(hdr != NULL);
1019*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1020*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_enter, HCI1394_TNF_HAL_STACK,
1021*7c478bd9Sstevel@tonic-gate 	    "");
1022*7c478bd9Sstevel@tonic-gate 
1023*7c478bd9Sstevel@tonic-gate 	/* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
1024*7c478bd9Sstevel@tonic-gate 	ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
1025*7c478bd9Sstevel@tonic-gate 
1026*7c478bd9Sstevel@tonic-gate 	/* make sure enough room for 1 OLI */
1027*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
1028*7c478bd9Sstevel@tonic-gate 
1029*7c478bd9Sstevel@tonic-gate 	/* Store the offset of the top of this descriptor block */
1030*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1031*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_ptrs.qp_begin);
1032*7c478bd9Sstevel@tonic-gate 
1033*7c478bd9Sstevel@tonic-gate 	/* Setup OpenHCI OLI Header */
1034*7c478bd9Sstevel@tonic-gate 	desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
1035*7c478bd9Sstevel@tonic-gate 	data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
1036*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1037*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
1038*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1039*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
1040*7c478bd9Sstevel@tonic-gate 
1041*7c478bd9Sstevel@tonic-gate 	/* Setup 1394 Header */
1042*7c478bd9Sstevel@tonic-gate 	tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
1043*7c478bd9Sstevel@tonic-gate 	if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
1044*7c478bd9Sstevel@tonic-gate 	    (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
1045*7c478bd9Sstevel@tonic-gate 		/*
1046*7c478bd9Sstevel@tonic-gate 		 * if the tcode = a quadlet write, move the last quadlet as
1047*7c478bd9Sstevel@tonic-gate 		 * 8-bit data.  All data is treated as 8-bit data (even quadlet
1048*7c478bd9Sstevel@tonic-gate 		 * reads and writes). Therefore, target drivers MUST take that
1049*7c478bd9Sstevel@tonic-gate 		 * into consideration when accessing device registers.
1050*7c478bd9Sstevel@tonic-gate 		 */
1051*7c478bd9Sstevel@tonic-gate 		ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
1052*7c478bd9Sstevel@tonic-gate 		    DDI_DEV_AUTOINCR);
1053*7c478bd9Sstevel@tonic-gate 		ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
1054*7c478bd9Sstevel@tonic-gate 		    (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
1055*7c478bd9Sstevel@tonic-gate 	} else {
1056*7c478bd9Sstevel@tonic-gate 		ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
1057*7c478bd9Sstevel@tonic-gate 		    hdrsize >> 2, DDI_DEV_AUTOINCR);
1058*7c478bd9Sstevel@tonic-gate 	}
1059*7c478bd9Sstevel@tonic-gate 
1060*7c478bd9Sstevel@tonic-gate 	/*
1061*7c478bd9Sstevel@tonic-gate 	 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
1062*7c478bd9Sstevel@tonic-gate 	 * accordingly.
1063*7c478bd9Sstevel@tonic-gate 	 */
1064*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt += 2;
1065*7c478bd9Sstevel@tonic-gate 
1066*7c478bd9Sstevel@tonic-gate 	/*
1067*7c478bd9Sstevel@tonic-gate 	 * Sync buffer in case DMA engine currently running. This must be done
1068*7c478bd9Sstevel@tonic-gate 	 * before writing the command pointer in the previous descriptor.
1069*7c478bd9Sstevel@tonic-gate 	 */
1070*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1071*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1072*7c478bd9Sstevel@tonic-gate 
1073*7c478bd9Sstevel@tonic-gate 	/* save away the status address for quick access in at_next() */
1074*7c478bd9Sstevel@tonic-gate 	cmd->qc_status_addr = &desc->status;
1075*7c478bd9Sstevel@tonic-gate 
1076*7c478bd9Sstevel@tonic-gate 	/*
1077*7c478bd9Sstevel@tonic-gate 	 * Setup the command pointer.  This tells the HW where to get the
1078*7c478bd9Sstevel@tonic-gate 	 * descriptor we just setup.  This includes the IO address along with
1079*7c478bd9Sstevel@tonic-gate 	 * a 4 bit 16 byte block count
1080*7c478bd9Sstevel@tonic-gate 	 */
1081*7c478bd9Sstevel@tonic-gate 	command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1082*7c478bd9Sstevel@tonic-gate 	    ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1083*7c478bd9Sstevel@tonic-gate 	    DESC_Z_MASK));
1084*7c478bd9Sstevel@tonic-gate 
1085*7c478bd9Sstevel@tonic-gate 	/*
1086*7c478bd9Sstevel@tonic-gate 	 * if we previously setup a descriptor, add this new descriptor into
1087*7c478bd9Sstevel@tonic-gate 	 * the previous descriptor's "next" pointer.
1088*7c478bd9Sstevel@tonic-gate 	 */
1089*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_previous != NULL) {
1090*7c478bd9Sstevel@tonic-gate 		ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1091*7c478bd9Sstevel@tonic-gate 		    command_ptr);
1092*7c478bd9Sstevel@tonic-gate 		/* Sync buffer again, this gets the command pointer */
1093*7c478bd9Sstevel@tonic-gate 		(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1094*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1095*7c478bd9Sstevel@tonic-gate 	}
1096*7c478bd9Sstevel@tonic-gate 
1097*7c478bd9Sstevel@tonic-gate 	/*
1098*7c478bd9Sstevel@tonic-gate 	 * this is now the previous descriptor.  Update the current pointer,
1099*7c478bd9Sstevel@tonic-gate 	 * clear the block count and reserved size since this is the end of
1100*7c478bd9Sstevel@tonic-gate 	 * this command.
1101*7c478bd9Sstevel@tonic-gate 	 */
1102*7c478bd9Sstevel@tonic-gate 	q_handle->q_previous = (hci1394_desc_t *)desc;
1103*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
1104*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt = 0;
1105*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_resv_size = 0;
1106*7c478bd9Sstevel@tonic-gate 
1107*7c478bd9Sstevel@tonic-gate 	/* save away cleanup info when we are done with the command */
1108*7c478bd9Sstevel@tonic-gate 	cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1109*7c478bd9Sstevel@tonic-gate 	cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1110*7c478bd9Sstevel@tonic-gate 
1111*7c478bd9Sstevel@tonic-gate 	/* If the DMA is not running, start it */
1112*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_dma_running == B_FALSE) {
1113*7c478bd9Sstevel@tonic-gate 		q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1114*7c478bd9Sstevel@tonic-gate 		    command_ptr);
1115*7c478bd9Sstevel@tonic-gate 		q_handle->q_dma_running = B_TRUE;
1116*7c478bd9Sstevel@tonic-gate 	/* the DMA is running, wake it up */
1117*7c478bd9Sstevel@tonic-gate 	} else {
1118*7c478bd9Sstevel@tonic-gate 		q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1119*7c478bd9Sstevel@tonic-gate 	}
1120*7c478bd9Sstevel@tonic-gate 
1121*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_exit, HCI1394_TNF_HAL_STACK,
1122*7c478bd9Sstevel@tonic-gate 	    "");
1123*7c478bd9Sstevel@tonic-gate }
1124*7c478bd9Sstevel@tonic-gate 
1125*7c478bd9Sstevel@tonic-gate 
1126*7c478bd9Sstevel@tonic-gate /*
1127*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_write_OL()
1128*7c478bd9Sstevel@tonic-gate  *    Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
1129*7c478bd9Sstevel@tonic-gate  *    Buffer state information is stored in cmd.  The IO address of the data
1130*7c478bd9Sstevel@tonic-gate  *    buffer is passed in io_addr.  Size is the size of the data to be
1131*7c478bd9Sstevel@tonic-gate  *    transferred.
1132*7c478bd9Sstevel@tonic-gate  */
1133*7c478bd9Sstevel@tonic-gate void
1134*7c478bd9Sstevel@tonic-gate hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1135*7c478bd9Sstevel@tonic-gate     hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
1136*7c478bd9Sstevel@tonic-gate {
1137*7c478bd9Sstevel@tonic-gate 	hci1394_desc_t *desc;
1138*7c478bd9Sstevel@tonic-gate 	uint32_t data;
1139*7c478bd9Sstevel@tonic-gate 	uint32_t command_ptr;
1140*7c478bd9Sstevel@tonic-gate 
1141*7c478bd9Sstevel@tonic-gate 
1142*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1143*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
1144*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
1145*7c478bd9Sstevel@tonic-gate 	ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1146*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_enter, HCI1394_TNF_HAL_STACK,
1147*7c478bd9Sstevel@tonic-gate 	    "");
1148*7c478bd9Sstevel@tonic-gate 
1149*7c478bd9Sstevel@tonic-gate 	/* make sure enough room for OL */
1150*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
1151*7c478bd9Sstevel@tonic-gate 
1152*7c478bd9Sstevel@tonic-gate 	/* Setup OpenHCI OL Header */
1153*7c478bd9Sstevel@tonic-gate 	desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1154*7c478bd9Sstevel@tonic-gate 	data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
1155*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1156*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1157*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1158*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
1159*7c478bd9Sstevel@tonic-gate 
1160*7c478bd9Sstevel@tonic-gate 	/*
1161*7c478bd9Sstevel@tonic-gate 	 * We wrote 1 16 byte block in the descriptor buffer, update the count
1162*7c478bd9Sstevel@tonic-gate 	 * accordingly.
1163*7c478bd9Sstevel@tonic-gate 	 */
1164*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt++;
1165*7c478bd9Sstevel@tonic-gate 
1166*7c478bd9Sstevel@tonic-gate 	/*
1167*7c478bd9Sstevel@tonic-gate 	 * Sync buffer in case DMA engine currently running. This must be done
1168*7c478bd9Sstevel@tonic-gate 	 * before writing the command pointer in the previous descriptor.
1169*7c478bd9Sstevel@tonic-gate 	 */
1170*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1171*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1172*7c478bd9Sstevel@tonic-gate 
1173*7c478bd9Sstevel@tonic-gate 	/* save away the status address for quick access in at_next() */
1174*7c478bd9Sstevel@tonic-gate 	cmd->qc_status_addr = &desc->status;
1175*7c478bd9Sstevel@tonic-gate 
1176*7c478bd9Sstevel@tonic-gate 	/*
1177*7c478bd9Sstevel@tonic-gate 	 * Setup the command pointer.  This tells the HW where to get the
1178*7c478bd9Sstevel@tonic-gate 	 * descriptor we just setup.  This includes the IO address along with
1179*7c478bd9Sstevel@tonic-gate 	 * a 4 bit 16 byte block count
1180*7c478bd9Sstevel@tonic-gate 	 */
1181*7c478bd9Sstevel@tonic-gate 	command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1182*7c478bd9Sstevel@tonic-gate 	    ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1183*7c478bd9Sstevel@tonic-gate 	    DESC_Z_MASK));
1184*7c478bd9Sstevel@tonic-gate 
1185*7c478bd9Sstevel@tonic-gate 	/*
1186*7c478bd9Sstevel@tonic-gate 	 * if we previously setup a descriptor, add this new descriptor into
1187*7c478bd9Sstevel@tonic-gate 	 * the previous descriptor's "next" pointer.
1188*7c478bd9Sstevel@tonic-gate 	 */
1189*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_previous != NULL) {
1190*7c478bd9Sstevel@tonic-gate 		ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1191*7c478bd9Sstevel@tonic-gate 		    command_ptr);
1192*7c478bd9Sstevel@tonic-gate 		/* Sync buffer again, this gets the command pointer */
1193*7c478bd9Sstevel@tonic-gate 		(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1194*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1195*7c478bd9Sstevel@tonic-gate 	}
1196*7c478bd9Sstevel@tonic-gate 
1197*7c478bd9Sstevel@tonic-gate 	/*
1198*7c478bd9Sstevel@tonic-gate 	 * this is now the previous descriptor.  Update the current pointer,
1199*7c478bd9Sstevel@tonic-gate 	 * clear the block count and reserved size since this is the end of
1200*7c478bd9Sstevel@tonic-gate 	 * this command.
1201*7c478bd9Sstevel@tonic-gate 	 */
1202*7c478bd9Sstevel@tonic-gate 	q_handle->q_previous = desc;
1203*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1204*7c478bd9Sstevel@tonic-gate 	q_handle->q_block_cnt = 0;
1205*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_resv_size = 0;
1206*7c478bd9Sstevel@tonic-gate 
1207*7c478bd9Sstevel@tonic-gate 	/* save away cleanup info when we are done with the command */
1208*7c478bd9Sstevel@tonic-gate 	cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1209*7c478bd9Sstevel@tonic-gate 	cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1210*7c478bd9Sstevel@tonic-gate 
1211*7c478bd9Sstevel@tonic-gate 	/* If the DMA is not running, start it */
1212*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_dma_running == B_FALSE) {
1213*7c478bd9Sstevel@tonic-gate 		q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1214*7c478bd9Sstevel@tonic-gate 		    command_ptr);
1215*7c478bd9Sstevel@tonic-gate 		q_handle->q_dma_running = B_TRUE;
1216*7c478bd9Sstevel@tonic-gate 	/* the DMA is running, wake it up */
1217*7c478bd9Sstevel@tonic-gate 	} else {
1218*7c478bd9Sstevel@tonic-gate 		q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1219*7c478bd9Sstevel@tonic-gate 	}
1220*7c478bd9Sstevel@tonic-gate 
1221*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_exit, HCI1394_TNF_HAL_STACK,
1222*7c478bd9Sstevel@tonic-gate 	    "");
1223*7c478bd9Sstevel@tonic-gate }
1224*7c478bd9Sstevel@tonic-gate 
1225*7c478bd9Sstevel@tonic-gate 
1226*7c478bd9Sstevel@tonic-gate /*
1227*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_rep_put8()
1228*7c478bd9Sstevel@tonic-gate  *    Copy a byte stream from a kernel virtual address (data) to a IO mapped
1229*7c478bd9Sstevel@tonic-gate  *    data buffer (qbuf).  Copy datasize bytes.  State information for the
1230*7c478bd9Sstevel@tonic-gate  *    data buffer is kept in cmd.
1231*7c478bd9Sstevel@tonic-gate  */
1232*7c478bd9Sstevel@tonic-gate void
1233*7c478bd9Sstevel@tonic-gate hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1234*7c478bd9Sstevel@tonic-gate     uint8_t *data, uint_t datasize)
1235*7c478bd9Sstevel@tonic-gate {
1236*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
1237*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
1238*7c478bd9Sstevel@tonic-gate 	ASSERT(data != NULL);
1239*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_enter, HCI1394_TNF_HAL_STACK,
1240*7c478bd9Sstevel@tonic-gate 	    "");
1241*7c478bd9Sstevel@tonic-gate 
1242*7c478bd9Sstevel@tonic-gate 	/* Make sure enough room for data */
1243*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
1244*7c478bd9Sstevel@tonic-gate 
1245*7c478bd9Sstevel@tonic-gate 	/* Copy in data into the data buffer */
1246*7c478bd9Sstevel@tonic-gate 	ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
1247*7c478bd9Sstevel@tonic-gate 	    (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
1248*7c478bd9Sstevel@tonic-gate 
1249*7c478bd9Sstevel@tonic-gate 	/* Update the current pointer, offset, and reserved size */
1250*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current += datasize;
1251*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1252*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_ptrs.qp_begin);
1253*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_resv_size -= datasize;
1254*7c478bd9Sstevel@tonic-gate 
1255*7c478bd9Sstevel@tonic-gate 	/* save away cleanup info when we are done with the command */
1256*7c478bd9Sstevel@tonic-gate 	cmd->qc_data_used = B_TRUE;
1257*7c478bd9Sstevel@tonic-gate 	cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
1258*7c478bd9Sstevel@tonic-gate 	cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
1259*7c478bd9Sstevel@tonic-gate 
1260*7c478bd9Sstevel@tonic-gate 	/* Sync data buffer */
1261*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1262*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1263*7c478bd9Sstevel@tonic-gate 
1264*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_exit, HCI1394_TNF_HAL_STACK,
1265*7c478bd9Sstevel@tonic-gate 	    "");
1266*7c478bd9Sstevel@tonic-gate }
1267*7c478bd9Sstevel@tonic-gate 
1268*7c478bd9Sstevel@tonic-gate 
1269*7c478bd9Sstevel@tonic-gate /*
1270*7c478bd9Sstevel@tonic-gate  * hci1394_q_at_copy_from_mblk()
1271*7c478bd9Sstevel@tonic-gate  *    Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
1272*7c478bd9Sstevel@tonic-gate  *    Copy mblk->length bytes. The services layer and the hal use a private
1273*7c478bd9Sstevel@tonic-gate  *    structure (h1394_mblk_t) to keep track of how much of the mblk to send
1274*7c478bd9Sstevel@tonic-gate  *    since we may have to break the transfer up into smaller blocks. (i.e. a
1275*7c478bd9Sstevel@tonic-gate  *    1MByte block write would go out in 2KByte chunks. State information for
1276*7c478bd9Sstevel@tonic-gate  *    the data buffer is kept in cmd.
1277*7c478bd9Sstevel@tonic-gate  */
1278*7c478bd9Sstevel@tonic-gate static void
1279*7c478bd9Sstevel@tonic-gate hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1280*7c478bd9Sstevel@tonic-gate     h1394_mblk_t *mblk)
1281*7c478bd9Sstevel@tonic-gate {
1282*7c478bd9Sstevel@tonic-gate 	uint_t bytes_left;
1283*7c478bd9Sstevel@tonic-gate 	uint_t length;
1284*7c478bd9Sstevel@tonic-gate 
1285*7c478bd9Sstevel@tonic-gate 
1286*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
1287*7c478bd9Sstevel@tonic-gate 	ASSERT(cmd != NULL);
1288*7c478bd9Sstevel@tonic-gate 	ASSERT(mblk != NULL);
1289*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_enter,
1290*7c478bd9Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK, "");
1291*7c478bd9Sstevel@tonic-gate 
1292*7c478bd9Sstevel@tonic-gate 	/* We return these variables to the Services Layer when we are done */
1293*7c478bd9Sstevel@tonic-gate 	mblk->next_offset = mblk->curr_offset;
1294*7c478bd9Sstevel@tonic-gate 	mblk->next_mblk = mblk->curr_mblk;
1295*7c478bd9Sstevel@tonic-gate 	bytes_left = mblk->length;
1296*7c478bd9Sstevel@tonic-gate 
1297*7c478bd9Sstevel@tonic-gate 	/* do while there are bytes left to copy */
1298*7c478bd9Sstevel@tonic-gate 	do {
1299*7c478bd9Sstevel@tonic-gate 		/*
1300*7c478bd9Sstevel@tonic-gate 		 * If the entire data portion of the current block transfer is
1301*7c478bd9Sstevel@tonic-gate 		 * contained within a single mblk.
1302*7c478bd9Sstevel@tonic-gate 		 */
1303*7c478bd9Sstevel@tonic-gate 		if ((mblk->next_offset + bytes_left) <=
1304*7c478bd9Sstevel@tonic-gate 		    (mblk->next_mblk->b_wptr)) {
1305*7c478bd9Sstevel@tonic-gate 			/* Copy the data into the data Q */
1306*7c478bd9Sstevel@tonic-gate 			hci1394_q_at_rep_put8(qbuf, cmd,
1307*7c478bd9Sstevel@tonic-gate 			    (uint8_t *)mblk->next_offset, bytes_left);
1308*7c478bd9Sstevel@tonic-gate 
1309*7c478bd9Sstevel@tonic-gate 			/* increment the mblk offset */
1310*7c478bd9Sstevel@tonic-gate 			mblk->next_offset += bytes_left;
1311*7c478bd9Sstevel@tonic-gate 
1312*7c478bd9Sstevel@tonic-gate 			/* we have no more bytes to put into the buffer */
1313*7c478bd9Sstevel@tonic-gate 			bytes_left = 0;
1314*7c478bd9Sstevel@tonic-gate 
1315*7c478bd9Sstevel@tonic-gate 			/*
1316*7c478bd9Sstevel@tonic-gate 			 * If our offset is at the end of data in this mblk, go
1317*7c478bd9Sstevel@tonic-gate 			 * to the next mblk.
1318*7c478bd9Sstevel@tonic-gate 			 */
1319*7c478bd9Sstevel@tonic-gate 			if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
1320*7c478bd9Sstevel@tonic-gate 				mblk->next_mblk = mblk->next_mblk->b_cont;
1321*7c478bd9Sstevel@tonic-gate 				if (mblk->next_mblk != NULL) {
1322*7c478bd9Sstevel@tonic-gate 					mblk->next_offset =
1323*7c478bd9Sstevel@tonic-gate 					    mblk->next_mblk->b_rptr;
1324*7c478bd9Sstevel@tonic-gate 				}
1325*7c478bd9Sstevel@tonic-gate 			}
1326*7c478bd9Sstevel@tonic-gate 
1327*7c478bd9Sstevel@tonic-gate 		/*
1328*7c478bd9Sstevel@tonic-gate 		 * The data portion of the current block transfer is spread
1329*7c478bd9Sstevel@tonic-gate 		 * across two or more mblk's
1330*7c478bd9Sstevel@tonic-gate 		 */
1331*7c478bd9Sstevel@tonic-gate 		} else {
1332*7c478bd9Sstevel@tonic-gate 			/*
1333*7c478bd9Sstevel@tonic-gate 			 * Figure out how much data is in this mblk.
1334*7c478bd9Sstevel@tonic-gate 			 */
1335*7c478bd9Sstevel@tonic-gate 			length = mblk->next_mblk->b_wptr - mblk->next_offset;
1336*7c478bd9Sstevel@tonic-gate 
1337*7c478bd9Sstevel@tonic-gate 			/* Copy the data into the atreq data Q */
1338*7c478bd9Sstevel@tonic-gate 			hci1394_q_at_rep_put8(qbuf, cmd,
1339*7c478bd9Sstevel@tonic-gate 			    (uint8_t *)mblk->next_offset, length);
1340*7c478bd9Sstevel@tonic-gate 
1341*7c478bd9Sstevel@tonic-gate 			/* update the bytes left count, go to the next mblk */
1342*7c478bd9Sstevel@tonic-gate 			bytes_left = bytes_left - length;
1343*7c478bd9Sstevel@tonic-gate 			mblk->next_mblk = mblk->next_mblk->b_cont;
1344*7c478bd9Sstevel@tonic-gate 			ASSERT(mblk->next_mblk != NULL);
1345*7c478bd9Sstevel@tonic-gate 			mblk->next_offset = mblk->next_mblk->b_rptr;
1346*7c478bd9Sstevel@tonic-gate 		}
1347*7c478bd9Sstevel@tonic-gate 	} while (bytes_left > 0);
1348*7c478bd9Sstevel@tonic-gate 
1349*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_exit,
1350*7c478bd9Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK, "");
1351*7c478bd9Sstevel@tonic-gate }
1352*7c478bd9Sstevel@tonic-gate 
1353*7c478bd9Sstevel@tonic-gate 
1354*7c478bd9Sstevel@tonic-gate /*
1355*7c478bd9Sstevel@tonic-gate  * hci1394_q_ar_next()
1356*7c478bd9Sstevel@tonic-gate  *    Return an address to the next received AR packet.  If there are no more
1357*7c478bd9Sstevel@tonic-gate  *    AR packets in the buffer, q_addr will be set to NULL.
1358*7c478bd9Sstevel@tonic-gate  */
1359*7c478bd9Sstevel@tonic-gate void
1360*7c478bd9Sstevel@tonic-gate hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
1361*7c478bd9Sstevel@tonic-gate {
1362*7c478bd9Sstevel@tonic-gate 	hci1394_desc_t *desc;
1363*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *descb;
1364*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *datab;
1365*7c478bd9Sstevel@tonic-gate 	uint32_t residual_count;
1366*7c478bd9Sstevel@tonic-gate 
1367*7c478bd9Sstevel@tonic-gate 
1368*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1369*7c478bd9Sstevel@tonic-gate 	ASSERT(q_addr != NULL);
1370*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_next_enter, HCI1394_TNF_HAL_STACK, "");
1371*7c478bd9Sstevel@tonic-gate 
1372*7c478bd9Sstevel@tonic-gate 	descb = &q_handle->q_desc;
1373*7c478bd9Sstevel@tonic-gate 	datab = &q_handle->q_data;
1374*7c478bd9Sstevel@tonic-gate 
1375*7c478bd9Sstevel@tonic-gate 	/* Sync Descriptor buffer */
1376*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
1377*7c478bd9Sstevel@tonic-gate 	    descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1378*7c478bd9Sstevel@tonic-gate 
1379*7c478bd9Sstevel@tonic-gate 	/*
1380*7c478bd9Sstevel@tonic-gate 	 * Check residual in current IM count vs q_space_left to see if we have
1381*7c478bd9Sstevel@tonic-gate 	 * received any more responses
1382*7c478bd9Sstevel@tonic-gate 	 */
1383*7c478bd9Sstevel@tonic-gate 	desc = (hci1394_desc_t *)q_handle->q_head;
1384*7c478bd9Sstevel@tonic-gate 	residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
1385*7c478bd9Sstevel@tonic-gate 	residual_count &= DESC_ST_RESCOUNT_MASK;
1386*7c478bd9Sstevel@tonic-gate 	if (residual_count >= q_handle->q_space_left) {
1387*7c478bd9Sstevel@tonic-gate 		/* No new packets received */
1388*7c478bd9Sstevel@tonic-gate 		*q_addr = NULL;
1389*7c478bd9Sstevel@tonic-gate 		TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit,
1390*7c478bd9Sstevel@tonic-gate 		    HCI1394_TNF_HAL_STACK, "");
1391*7c478bd9Sstevel@tonic-gate 		return;
1392*7c478bd9Sstevel@tonic-gate 	}
1393*7c478bd9Sstevel@tonic-gate 
1394*7c478bd9Sstevel@tonic-gate 	/* Sync Data Q */
1395*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
1396*7c478bd9Sstevel@tonic-gate 	    datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1397*7c478bd9Sstevel@tonic-gate 
1398*7c478bd9Sstevel@tonic-gate 	/*
1399*7c478bd9Sstevel@tonic-gate 	 * We have a new packet, return the address of the start of the
1400*7c478bd9Sstevel@tonic-gate 	 * packet.
1401*7c478bd9Sstevel@tonic-gate 	 */
1402*7c478bd9Sstevel@tonic-gate 	*q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
1403*7c478bd9Sstevel@tonic-gate 
1404*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit, HCI1394_TNF_HAL_STACK, "");
1405*7c478bd9Sstevel@tonic-gate }
1406*7c478bd9Sstevel@tonic-gate 
1407*7c478bd9Sstevel@tonic-gate 
1408*7c478bd9Sstevel@tonic-gate /*
1409*7c478bd9Sstevel@tonic-gate  * hci1394_q_ar_free()
1410*7c478bd9Sstevel@tonic-gate  *    Free the space used by the AR packet at the top of the data buffer. AR
1411*7c478bd9Sstevel@tonic-gate  *    packets are processed in the order that they are received.  This will
1412*7c478bd9Sstevel@tonic-gate  *    free the oldest received packet which has not yet been freed.  size is
1413*7c478bd9Sstevel@tonic-gate  *    how much space the packet takes up.
1414*7c478bd9Sstevel@tonic-gate  */
1415*7c478bd9Sstevel@tonic-gate void
1416*7c478bd9Sstevel@tonic-gate hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
1417*7c478bd9Sstevel@tonic-gate {
1418*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *descb;
1419*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *datab;
1420*7c478bd9Sstevel@tonic-gate 
1421*7c478bd9Sstevel@tonic-gate 
1422*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1423*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_free_enter, HCI1394_TNF_HAL_STACK, "");
1424*7c478bd9Sstevel@tonic-gate 
1425*7c478bd9Sstevel@tonic-gate 	descb = &q_handle->q_desc;
1426*7c478bd9Sstevel@tonic-gate 	datab = &q_handle->q_data;
1427*7c478bd9Sstevel@tonic-gate 
1428*7c478bd9Sstevel@tonic-gate 	/*
1429*7c478bd9Sstevel@tonic-gate 	 * Packet is in multiple buffers. Theoretically a buffer could be broken
1430*7c478bd9Sstevel@tonic-gate 	 * in more than two buffers for an ARRESP.  Since the buffers should be
1431*7c478bd9Sstevel@tonic-gate 	 * in at least 4K increments this will not happen since the max packet
1432*7c478bd9Sstevel@tonic-gate 	 * size is 2KBytes.
1433*7c478bd9Sstevel@tonic-gate 	 */
1434*7c478bd9Sstevel@tonic-gate 	if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
1435*7c478bd9Sstevel@tonic-gate 		/* Add IM descriptor for used buffer back into Q */
1436*7c478bd9Sstevel@tonic-gate 		hci1394_q_ar_write_IM(q_handle, descb,
1437*7c478bd9Sstevel@tonic-gate 		    datab->qb_cookie[datab->qb_ptrs.qp_current_buf
1438*7c478bd9Sstevel@tonic-gate 		    ].dmac_address,
1439*7c478bd9Sstevel@tonic-gate 		    datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
1440*7c478bd9Sstevel@tonic-gate 
1441*7c478bd9Sstevel@tonic-gate 		/* Go to the next buffer */
1442*7c478bd9Sstevel@tonic-gate 		hci1394_q_next_buf(datab);
1443*7c478bd9Sstevel@tonic-gate 
1444*7c478bd9Sstevel@tonic-gate 		/* Update next buffers pointers for partial packet */
1445*7c478bd9Sstevel@tonic-gate 		size -= q_handle->q_space_left;
1446*7c478bd9Sstevel@tonic-gate 		datab->qb_ptrs.qp_current += size;
1447*7c478bd9Sstevel@tonic-gate 		q_handle->q_space_left =
1448*7c478bd9Sstevel@tonic-gate 		    datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
1449*7c478bd9Sstevel@tonic-gate 		    size;
1450*7c478bd9Sstevel@tonic-gate 
1451*7c478bd9Sstevel@tonic-gate 		/* Change the head pointer to the next IM descriptor */
1452*7c478bd9Sstevel@tonic-gate 		q_handle->q_head += sizeof (hci1394_desc_t);
1453*7c478bd9Sstevel@tonic-gate 		if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
1454*7c478bd9Sstevel@tonic-gate 		    (descb->qb_ptrs.qp_bottom + 1)) {
1455*7c478bd9Sstevel@tonic-gate 			q_handle->q_head = descb->qb_ptrs.qp_top;
1456*7c478bd9Sstevel@tonic-gate 		}
1457*7c478bd9Sstevel@tonic-gate 
1458*7c478bd9Sstevel@tonic-gate 	/* Packet is only in one buffer */
1459*7c478bd9Sstevel@tonic-gate 	} else {
1460*7c478bd9Sstevel@tonic-gate 		q_handle->q_space_left -= size;
1461*7c478bd9Sstevel@tonic-gate 		datab->qb_ptrs.qp_current += size;
1462*7c478bd9Sstevel@tonic-gate 	}
1463*7c478bd9Sstevel@tonic-gate 
1464*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_free_exit, HCI1394_TNF_HAL_STACK, "");
1465*7c478bd9Sstevel@tonic-gate }
1466*7c478bd9Sstevel@tonic-gate 
1467*7c478bd9Sstevel@tonic-gate 
1468*7c478bd9Sstevel@tonic-gate /*
1469*7c478bd9Sstevel@tonic-gate  * hci1394_q_ar_get32()
1470*7c478bd9Sstevel@tonic-gate  *    Read a quadlet of data regardless if it is in the current buffer or has
1471*7c478bd9Sstevel@tonic-gate  *    wrapped to the top buffer.  If the address passed to this routine is
1472*7c478bd9Sstevel@tonic-gate  *    passed the bottom of the data buffer, this routine will automatically
1473*7c478bd9Sstevel@tonic-gate  *    wrap back to the top of the Q and look in the correct offset from the
1474*7c478bd9Sstevel@tonic-gate  *    top. Copy the data into the kernel virtual address provided.
1475*7c478bd9Sstevel@tonic-gate  */
1476*7c478bd9Sstevel@tonic-gate uint32_t
1477*7c478bd9Sstevel@tonic-gate hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
1478*7c478bd9Sstevel@tonic-gate {
1479*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *data;
1480*7c478bd9Sstevel@tonic-gate 	uintptr_t new_addr;
1481*7c478bd9Sstevel@tonic-gate 	uint32_t data32;
1482*7c478bd9Sstevel@tonic-gate 
1483*7c478bd9Sstevel@tonic-gate 
1484*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1485*7c478bd9Sstevel@tonic-gate 	ASSERT(addr != NULL);
1486*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_get32_enter, HCI1394_TNF_HAL_STACK, "");
1487*7c478bd9Sstevel@tonic-gate 
1488*7c478bd9Sstevel@tonic-gate 	data = &q_handle->q_data;
1489*7c478bd9Sstevel@tonic-gate 
1490*7c478bd9Sstevel@tonic-gate 	/*
1491*7c478bd9Sstevel@tonic-gate 	 * if the data has wrapped to the top of the buffer, adjust the address.
1492*7c478bd9Sstevel@tonic-gate 	 */
1493*7c478bd9Sstevel@tonic-gate 	if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1494*7c478bd9Sstevel@tonic-gate 		new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
1495*7c478bd9Sstevel@tonic-gate 		    ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
1496*7c478bd9Sstevel@tonic-gate 		data32 = ddi_get32(data->qb_buf.bi_handle,
1497*7c478bd9Sstevel@tonic-gate 		    (uint32_t *)new_addr);
1498*7c478bd9Sstevel@tonic-gate 
1499*7c478bd9Sstevel@tonic-gate 	/* data is before end of buffer */
1500*7c478bd9Sstevel@tonic-gate 	} else {
1501*7c478bd9Sstevel@tonic-gate 		data32 = ddi_get32(data->qb_buf.bi_handle, addr);
1502*7c478bd9Sstevel@tonic-gate 	}
1503*7c478bd9Sstevel@tonic-gate 
1504*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_get32_exit, HCI1394_TNF_HAL_STACK, "");
1505*7c478bd9Sstevel@tonic-gate 
1506*7c478bd9Sstevel@tonic-gate 	return (data32);
1507*7c478bd9Sstevel@tonic-gate }
1508*7c478bd9Sstevel@tonic-gate 
1509*7c478bd9Sstevel@tonic-gate 
1510*7c478bd9Sstevel@tonic-gate /*
1511*7c478bd9Sstevel@tonic-gate  * hci1394_q_ar_rep_get8()
1512*7c478bd9Sstevel@tonic-gate  *    Read a byte stream of data regardless if it is contiguous or has partially
1513*7c478bd9Sstevel@tonic-gate  *    or fully wrapped to the top buffer.  If the address passed to this routine
1514*7c478bd9Sstevel@tonic-gate  *    is passed the bottom of the data buffer, or address + size is past the
1515*7c478bd9Sstevel@tonic-gate  *    bottom of the data buffer. this routine will automatically wrap back to
1516*7c478bd9Sstevel@tonic-gate  *    the top of the Q and look in the correct offset from the top. Copy the
1517*7c478bd9Sstevel@tonic-gate  *    data into the kernel virtual address provided.
1518*7c478bd9Sstevel@tonic-gate  */
1519*7c478bd9Sstevel@tonic-gate void
1520*7c478bd9Sstevel@tonic-gate hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
1521*7c478bd9Sstevel@tonic-gate     uint8_t *q_addr, uint_t size)
1522*7c478bd9Sstevel@tonic-gate {
1523*7c478bd9Sstevel@tonic-gate 	hci1394_q_buf_t *data;
1524*7c478bd9Sstevel@tonic-gate 	uintptr_t new_addr;
1525*7c478bd9Sstevel@tonic-gate 	uint_t new_size;
1526*7c478bd9Sstevel@tonic-gate 	uintptr_t new_dest;
1527*7c478bd9Sstevel@tonic-gate 
1528*7c478bd9Sstevel@tonic-gate 
1529*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1530*7c478bd9Sstevel@tonic-gate 	ASSERT(dest != NULL);
1531*7c478bd9Sstevel@tonic-gate 	ASSERT(q_addr != NULL);
1532*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_enter, HCI1394_TNF_HAL_STACK,
1533*7c478bd9Sstevel@tonic-gate 	    "");
1534*7c478bd9Sstevel@tonic-gate 
1535*7c478bd9Sstevel@tonic-gate 	data = &q_handle->q_data;
1536*7c478bd9Sstevel@tonic-gate 
1537*7c478bd9Sstevel@tonic-gate 	/*
1538*7c478bd9Sstevel@tonic-gate 	 * There are three cases:
1539*7c478bd9Sstevel@tonic-gate 	 *   1) All of the data has wrapped.
1540*7c478bd9Sstevel@tonic-gate 	 *   2) Some of the data has not wrapped and some has wrapped.
1541*7c478bd9Sstevel@tonic-gate 	 *   3) None of the data has wrapped.
1542*7c478bd9Sstevel@tonic-gate 	 */
1543*7c478bd9Sstevel@tonic-gate 
1544*7c478bd9Sstevel@tonic-gate 	/* All of the data has wrapped, just adjust the starting address */
1545*7c478bd9Sstevel@tonic-gate 	if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1546*7c478bd9Sstevel@tonic-gate 		new_addr = (uintptr_t)data->qb_ptrs.qp_top +
1547*7c478bd9Sstevel@tonic-gate 		    ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
1548*7c478bd9Sstevel@tonic-gate 		    (uintptr_t)1));
1549*7c478bd9Sstevel@tonic-gate 		ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
1550*7c478bd9Sstevel@tonic-gate 		    size, DDI_DEV_AUTOINCR);
1551*7c478bd9Sstevel@tonic-gate 
1552*7c478bd9Sstevel@tonic-gate 	/*
1553*7c478bd9Sstevel@tonic-gate 	 * Some of the data has wrapped. Copy the data that hasn't wrapped,
1554*7c478bd9Sstevel@tonic-gate 	 * adjust the address, then copy the rest.
1555*7c478bd9Sstevel@tonic-gate 	 */
1556*7c478bd9Sstevel@tonic-gate 	} else if (((uintptr_t)q_addr + (uintptr_t)size) >
1557*7c478bd9Sstevel@tonic-gate 	    ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
1558*7c478bd9Sstevel@tonic-gate 		/* Copy first half */
1559*7c478bd9Sstevel@tonic-gate 		new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
1560*7c478bd9Sstevel@tonic-gate 		    (uintptr_t)1) - (uintptr_t)q_addr);
1561*7c478bd9Sstevel@tonic-gate 		ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
1562*7c478bd9Sstevel@tonic-gate 		    DDI_DEV_AUTOINCR);
1563*7c478bd9Sstevel@tonic-gate 
1564*7c478bd9Sstevel@tonic-gate 		/* copy second half */
1565*7c478bd9Sstevel@tonic-gate 		new_dest = (uintptr_t)dest + (uintptr_t)new_size;
1566*7c478bd9Sstevel@tonic-gate 		new_size = size - new_size;
1567*7c478bd9Sstevel@tonic-gate 		new_addr = (uintptr_t)data->qb_ptrs.qp_top;
1568*7c478bd9Sstevel@tonic-gate 		ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
1569*7c478bd9Sstevel@tonic-gate 		    (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
1570*7c478bd9Sstevel@tonic-gate 
1571*7c478bd9Sstevel@tonic-gate 	/* None of the data has wrapped */
1572*7c478bd9Sstevel@tonic-gate 	} else {
1573*7c478bd9Sstevel@tonic-gate 		ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
1574*7c478bd9Sstevel@tonic-gate 		    DDI_DEV_AUTOINCR);
1575*7c478bd9Sstevel@tonic-gate 	}
1576*7c478bd9Sstevel@tonic-gate 
1577*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_exit, HCI1394_TNF_HAL_STACK,
1578*7c478bd9Sstevel@tonic-gate 	    "");
1579*7c478bd9Sstevel@tonic-gate }
1580*7c478bd9Sstevel@tonic-gate 
1581*7c478bd9Sstevel@tonic-gate 
1582*7c478bd9Sstevel@tonic-gate /*
1583*7c478bd9Sstevel@tonic-gate  * hci1394_q_ar_copy_to_mblk()
1584*7c478bd9Sstevel@tonic-gate  *    Read a byte stream of data regardless if it is contiguous or has partially
1585*7c478bd9Sstevel@tonic-gate  *    or fully wrapped to the top buffer.  If the address passed to this routine
1586*7c478bd9Sstevel@tonic-gate  *    is passed the bottom of the data buffer, or address + size is passed the
1587*7c478bd9Sstevel@tonic-gate  *    bottom of the data buffer. this routine will automatically wrap back to
1588*7c478bd9Sstevel@tonic-gate  *    the top of the Q and look in the correct offset from the top. Copy the
1589*7c478bd9Sstevel@tonic-gate  *    data into the mblk provided. The services layer and the hal use a private
1590*7c478bd9Sstevel@tonic-gate  *    structure (h1394_mblk_t) to keep track of how much of the mblk to receive
1591*7c478bd9Sstevel@tonic-gate  *    into since we may have to break the transfer up into smaller blocks.
1592*7c478bd9Sstevel@tonic-gate  *    (i.e. a 1MByte block read would go out in 2KByte requests.
1593*7c478bd9Sstevel@tonic-gate  */
1594*7c478bd9Sstevel@tonic-gate void
1595*7c478bd9Sstevel@tonic-gate hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
1596*7c478bd9Sstevel@tonic-gate     h1394_mblk_t *mblk)
1597*7c478bd9Sstevel@tonic-gate {
1598*7c478bd9Sstevel@tonic-gate 	uint8_t *new_addr;
1599*7c478bd9Sstevel@tonic-gate 	uint_t bytes_left;
1600*7c478bd9Sstevel@tonic-gate 	uint_t length;
1601*7c478bd9Sstevel@tonic-gate 
1602*7c478bd9Sstevel@tonic-gate 
1603*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1604*7c478bd9Sstevel@tonic-gate 	ASSERT(addr != NULL);
1605*7c478bd9Sstevel@tonic-gate 	ASSERT(mblk != NULL);
1606*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_enter,
1607*7c478bd9Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK, "");
1608*7c478bd9Sstevel@tonic-gate 
1609*7c478bd9Sstevel@tonic-gate 	/* We return these variables to the Services Layer when we are done */
1610*7c478bd9Sstevel@tonic-gate 	mblk->next_offset = mblk->curr_offset;
1611*7c478bd9Sstevel@tonic-gate 	mblk->next_mblk = mblk->curr_mblk;
1612*7c478bd9Sstevel@tonic-gate 	bytes_left = mblk->length;
1613*7c478bd9Sstevel@tonic-gate 
1614*7c478bd9Sstevel@tonic-gate 	/* the address we copy from will change as we change mblks */
1615*7c478bd9Sstevel@tonic-gate 	new_addr = addr;
1616*7c478bd9Sstevel@tonic-gate 
1617*7c478bd9Sstevel@tonic-gate 	/* do while there are bytes left to copy */
1618*7c478bd9Sstevel@tonic-gate 	do {
1619*7c478bd9Sstevel@tonic-gate 		/*
1620*7c478bd9Sstevel@tonic-gate 		 * If the entire data portion of the current block transfer is
1621*7c478bd9Sstevel@tonic-gate 		 * contained within a single mblk.
1622*7c478bd9Sstevel@tonic-gate 		 */
1623*7c478bd9Sstevel@tonic-gate 		if ((mblk->next_offset + bytes_left) <=
1624*7c478bd9Sstevel@tonic-gate 		    (mblk->next_mblk->b_datap->db_lim)) {
1625*7c478bd9Sstevel@tonic-gate 			/* Copy the data into the mblk */
1626*7c478bd9Sstevel@tonic-gate 			hci1394_q_ar_rep_get8(q_handle,
1627*7c478bd9Sstevel@tonic-gate 			    (uint8_t *)mblk->next_offset, new_addr, bytes_left);
1628*7c478bd9Sstevel@tonic-gate 
1629*7c478bd9Sstevel@tonic-gate 			/* increment the offset */
1630*7c478bd9Sstevel@tonic-gate 			mblk->next_offset += bytes_left;
1631*7c478bd9Sstevel@tonic-gate 			mblk->next_mblk->b_wptr = mblk->next_offset;
1632*7c478bd9Sstevel@tonic-gate 
1633*7c478bd9Sstevel@tonic-gate 			/* we have no more bytes to put into the buffer */
1634*7c478bd9Sstevel@tonic-gate 			bytes_left = 0;
1635*7c478bd9Sstevel@tonic-gate 
1636*7c478bd9Sstevel@tonic-gate 			/*
1637*7c478bd9Sstevel@tonic-gate 			 * If our offset is at the end of data in this mblk, go
1638*7c478bd9Sstevel@tonic-gate 			 * to the next mblk.
1639*7c478bd9Sstevel@tonic-gate 			 */
1640*7c478bd9Sstevel@tonic-gate 			if (mblk->next_offset >=
1641*7c478bd9Sstevel@tonic-gate 			    mblk->next_mblk->b_datap->db_lim) {
1642*7c478bd9Sstevel@tonic-gate 				mblk->next_mblk = mblk->next_mblk->b_cont;
1643*7c478bd9Sstevel@tonic-gate 				if (mblk->next_mblk != NULL) {
1644*7c478bd9Sstevel@tonic-gate 					mblk->next_offset =
1645*7c478bd9Sstevel@tonic-gate 					    mblk->next_mblk->b_wptr;
1646*7c478bd9Sstevel@tonic-gate 				}
1647*7c478bd9Sstevel@tonic-gate 			}
1648*7c478bd9Sstevel@tonic-gate 
1649*7c478bd9Sstevel@tonic-gate 		/*
1650*7c478bd9Sstevel@tonic-gate 		 * The data portion of the current block transfer is spread
1651*7c478bd9Sstevel@tonic-gate 		 * across two or more mblk's
1652*7c478bd9Sstevel@tonic-gate 		 */
1653*7c478bd9Sstevel@tonic-gate 		} else {
1654*7c478bd9Sstevel@tonic-gate 			/* Figure out how much data is in this mblk */
1655*7c478bd9Sstevel@tonic-gate 			length = mblk->next_mblk->b_datap->db_lim -
1656*7c478bd9Sstevel@tonic-gate 			    mblk->next_offset;
1657*7c478bd9Sstevel@tonic-gate 
1658*7c478bd9Sstevel@tonic-gate 			/* Copy the data into the mblk */
1659*7c478bd9Sstevel@tonic-gate 			hci1394_q_ar_rep_get8(q_handle,
1660*7c478bd9Sstevel@tonic-gate 			    (uint8_t *)mblk->next_offset, new_addr, length);
1661*7c478bd9Sstevel@tonic-gate 			mblk->next_mblk->b_wptr =
1662*7c478bd9Sstevel@tonic-gate 			    mblk->next_mblk->b_datap->db_lim;
1663*7c478bd9Sstevel@tonic-gate 
1664*7c478bd9Sstevel@tonic-gate 			/*
1665*7c478bd9Sstevel@tonic-gate 			 * update the bytes left and address to copy from, go
1666*7c478bd9Sstevel@tonic-gate 			 * to the next mblk.
1667*7c478bd9Sstevel@tonic-gate 			 */
1668*7c478bd9Sstevel@tonic-gate 			bytes_left = bytes_left - length;
1669*7c478bd9Sstevel@tonic-gate 			new_addr = (uint8_t *)((uintptr_t)new_addr +
1670*7c478bd9Sstevel@tonic-gate 			    (uintptr_t)length);
1671*7c478bd9Sstevel@tonic-gate 			mblk->next_mblk = mblk->next_mblk->b_cont;
1672*7c478bd9Sstevel@tonic-gate 			ASSERT(mblk->next_mblk != NULL);
1673*7c478bd9Sstevel@tonic-gate 			mblk->next_offset = mblk->next_mblk->b_wptr;
1674*7c478bd9Sstevel@tonic-gate 		}
1675*7c478bd9Sstevel@tonic-gate 	} while (bytes_left > 0);
1676*7c478bd9Sstevel@tonic-gate 
1677*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_exit,
1678*7c478bd9Sstevel@tonic-gate 	    HCI1394_TNF_HAL_STACK, "");
1679*7c478bd9Sstevel@tonic-gate }
1680*7c478bd9Sstevel@tonic-gate 
1681*7c478bd9Sstevel@tonic-gate 
1682*7c478bd9Sstevel@tonic-gate /*
1683*7c478bd9Sstevel@tonic-gate  * hci1394_q_ar_write_IM()
1684*7c478bd9Sstevel@tonic-gate  *    Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
1685*7c478bd9Sstevel@tonic-gate  *    The IO address of the data buffer is passed in io_addr.  datasize is the
1686*7c478bd9Sstevel@tonic-gate  *    size of the data data buffer to receive into.
1687*7c478bd9Sstevel@tonic-gate  */
1688*7c478bd9Sstevel@tonic-gate void
1689*7c478bd9Sstevel@tonic-gate hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1690*7c478bd9Sstevel@tonic-gate     uint32_t io_addr, uint_t datasize)
1691*7c478bd9Sstevel@tonic-gate {
1692*7c478bd9Sstevel@tonic-gate 	hci1394_desc_t *desc;
1693*7c478bd9Sstevel@tonic-gate 	uint32_t data;
1694*7c478bd9Sstevel@tonic-gate 	uint32_t command_ptr;
1695*7c478bd9Sstevel@tonic-gate 
1696*7c478bd9Sstevel@tonic-gate 
1697*7c478bd9Sstevel@tonic-gate 	ASSERT(q_handle != NULL);
1698*7c478bd9Sstevel@tonic-gate 	ASSERT(qbuf != NULL);
1699*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_enter, HCI1394_TNF_HAL_STACK,
1700*7c478bd9Sstevel@tonic-gate 	    "");
1701*7c478bd9Sstevel@tonic-gate 
1702*7c478bd9Sstevel@tonic-gate 	/* Make sure enough room for IM */
1703*7c478bd9Sstevel@tonic-gate 	if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
1704*7c478bd9Sstevel@tonic-gate 	    (qbuf->qb_ptrs.qp_bottom + 1)) {
1705*7c478bd9Sstevel@tonic-gate 		hci1394_q_next_buf(qbuf);
1706*7c478bd9Sstevel@tonic-gate 	} else {
1707*7c478bd9Sstevel@tonic-gate 		/* Store the offset of the top of this descriptor block */
1708*7c478bd9Sstevel@tonic-gate 		qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1709*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_ptrs.qp_begin);
1710*7c478bd9Sstevel@tonic-gate 	}
1711*7c478bd9Sstevel@tonic-gate 
1712*7c478bd9Sstevel@tonic-gate 	/* Setup OpenHCI IM Header */
1713*7c478bd9Sstevel@tonic-gate 	desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1714*7c478bd9Sstevel@tonic-gate 	data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
1715*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1716*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1717*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1718*7c478bd9Sstevel@tonic-gate 	ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
1719*7c478bd9Sstevel@tonic-gate 	    DESC_ST_RESCOUNT_MASK);
1720*7c478bd9Sstevel@tonic-gate 
1721*7c478bd9Sstevel@tonic-gate 	/*
1722*7c478bd9Sstevel@tonic-gate 	 * Sync buffer in case DMA engine currently running. This must be done
1723*7c478bd9Sstevel@tonic-gate 	 * before writing the command pointer in the previous descriptor.
1724*7c478bd9Sstevel@tonic-gate 	 */
1725*7c478bd9Sstevel@tonic-gate 	(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1726*7c478bd9Sstevel@tonic-gate 	    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1727*7c478bd9Sstevel@tonic-gate 
1728*7c478bd9Sstevel@tonic-gate 	/*
1729*7c478bd9Sstevel@tonic-gate 	 * Setup the command pointer.  This tells the HW where to get the
1730*7c478bd9Sstevel@tonic-gate 	 * descriptor we just setup.  This includes the IO address along with
1731*7c478bd9Sstevel@tonic-gate 	 * a 4 bit 16 byte block count.  We only wrote 1 16 byte block.
1732*7c478bd9Sstevel@tonic-gate 	 */
1733*7c478bd9Sstevel@tonic-gate 	command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1734*7c478bd9Sstevel@tonic-gate 	    ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
1735*7c478bd9Sstevel@tonic-gate 
1736*7c478bd9Sstevel@tonic-gate 	/*
1737*7c478bd9Sstevel@tonic-gate 	 * if we previously setup a descriptor, add this new descriptor into
1738*7c478bd9Sstevel@tonic-gate 	 * the previous descriptor's "next" pointer.
1739*7c478bd9Sstevel@tonic-gate 	 */
1740*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_previous != NULL) {
1741*7c478bd9Sstevel@tonic-gate 		ddi_put32(qbuf->qb_buf.bi_handle,
1742*7c478bd9Sstevel@tonic-gate 		    &q_handle->q_previous->branch, command_ptr);
1743*7c478bd9Sstevel@tonic-gate 		/* Sync buffer again, this gets the command pointer */
1744*7c478bd9Sstevel@tonic-gate 		(void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1745*7c478bd9Sstevel@tonic-gate 		    qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1746*7c478bd9Sstevel@tonic-gate 	}
1747*7c478bd9Sstevel@tonic-gate 
1748*7c478bd9Sstevel@tonic-gate 	/* this is the new previous descriptor.  Update the current pointer */
1749*7c478bd9Sstevel@tonic-gate 	q_handle->q_previous = desc;
1750*7c478bd9Sstevel@tonic-gate 	qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1751*7c478bd9Sstevel@tonic-gate 
1752*7c478bd9Sstevel@tonic-gate 	/* If the DMA is not running, start it */
1753*7c478bd9Sstevel@tonic-gate 	if (q_handle->q_dma_running == B_FALSE) {
1754*7c478bd9Sstevel@tonic-gate 		q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1755*7c478bd9Sstevel@tonic-gate 		    command_ptr);
1756*7c478bd9Sstevel@tonic-gate 		q_handle->q_dma_running = B_TRUE;
1757*7c478bd9Sstevel@tonic-gate 	/* the DMA is running, wake it up */
1758*7c478bd9Sstevel@tonic-gate 	} else {
1759*7c478bd9Sstevel@tonic-gate 		q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1760*7c478bd9Sstevel@tonic-gate 	}
1761*7c478bd9Sstevel@tonic-gate 
1762*7c478bd9Sstevel@tonic-gate 	TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_exit, HCI1394_TNF_HAL_STACK,
1763*7c478bd9Sstevel@tonic-gate 	    "");
1764*7c478bd9Sstevel@tonic-gate }
1765