xref: /illumos-gate/usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c (revision ffc2b7d4ae69a2eeeab283452dc5c0d70ce7519f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * hci1394_ixl_update.c
28  *    Isochronous IXL update routines.
29  *    Routines used to dynamically update a compiled and presumably running
30  *    IXL program.
31  */
32 
33 #include <sys/kmem.h>
34 #include <sys/types.h>
35 #include <sys/conf.h>
36 #include <sys/disp.h>
37 
38 #include <sys/tnf_probe.h>
39 
40 #include <sys/1394/h1394.h>
41 #include <sys/1394/ixl1394.h>	/* IXL opcodes & data structs */
42 
43 #include <sys/1394/adapters/hci1394.h>
44 
45 
46 /* local defines for hci1394_ixl_update_prepare return codes */
47 #define	IXL_PREP_READY	    1
48 #define	IXL_PREP_SUCCESS    0
49 #define	IXL_PREP_FAILURE    (-1)
50 
51 /*
52  * variable used to indicate the number of times update will wait for
53  * interrupt routine to complete.
54  */
55 int hci1394_upd_retries_before_fail = 50;
56 
57 /* IXL runtime update static functions */
58 static int hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp);
59 static int hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp);
60 static int hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp);
61 static int hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp);
62 static int hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp);
63 static int hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp);
64 static int hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp);
65 static int hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp);
66 static int hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp);
67 static int hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp);
68 static int hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp);
69 static void hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp);
70 static int hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp);
71 static int hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp);
72 
73 /*
74  *	IXL commands and included fields which can be updated
75  * IXL1394_OP_CALLBACK:		callback(), callback_data
76  * IXL1394_OP_JUMP:		label
77  * IXL1394_OP_RECV_PKT		ixl_buf, size, mem_bufp
78  * IXL1394_OP_RECV_PKT_ST	ixl_buf, size, mem_bufp
79  * IXL1394_OP_RECV_BUF(ppb)	ixl_buf, size, pkt_size, mem_bufp, buf_offset
80  * IXL1394_OP_RECV_BUF(fill)	ixl_buf, size, pkt_size, mem_bufp, buf_offset
81  * IXL1394_OP_SEND_PKT		ixl_buf, size, mem_bufp
82  * IXL1394_OP_SEND_PKT_ST	ixl_buf, size, mem_bufp
83  * IXL1394_OP_SEND_PKT_WHDR_ST	ixl_buf, size, mem_bufp
84  * IXL1394_OP_SEND_BUF		ixl_buf, size, pkt_size, mem_bufp, buf_offset
85  * IXL1394_OP_SET_TAGSYNC	tag, sync
86  * IXL1394_OP_SET_SKIPMODE	skipmode, label
87  *
88  *	IXL commands which can not be updated
89  * IXL1394_OP_LABEL
90  * IXL1394_OP_SEND_HDR_ONLY
91  * IXL1394_OP_SEND_NOPKT
92  * IXL1394_OP_STORE_VALUE
93  * IXL1394_OP_STORE_TIMESTAMP
94  * IXL1394_OP_SET_SYNCWAIT
95  */
96 
97 /*
98  * hci1394_ixl_update
99  *    main entrypoint into dynamic update code: initializes temporary
100  *    update variables, evaluates request, coordinates with potentially
101  *    simultaneous run of interrupt stack, evaluates likelyhood of success,
102  *    performs the update, checks if completed, performs cleanup
103  *    resulting from coordination with interrupt stack.
104  */
105 int
106 hci1394_ixl_update(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
107     ixl1394_command_t *ixlnewp, ixl1394_command_t *ixloldp,
108     uint_t riskoverride, int *resultp)
109 {
110 	hci1394_ixl_update_vars_t uv;	/* update work variables structure */
111 	int prepstatus;
112 	int ret;
113 
114 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_enter,
115 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
116 
117 
118 	/* save caller specified values in update work variables structure */
119 	uv.soft_statep = soft_statep;
120 	uv.ctxtp = ctxtp;
121 	uv.ixlnewp = ixlnewp;
122 	uv.ixloldp = ixloldp;
123 	uv.risklevel = riskoverride;
124 
125 	/* initialize remainder of update work variables */
126 	uv.ixlxferp = NULL;
127 	uv.skipxferp = NULL;
128 	uv.skipmode = 0;
129 	uv.skipaddr = 0;
130 	uv.jumpaddr = 0;
131 	uv.pkthdr1 = 0;
132 	uv.pkthdr2 = 0;
133 	uv.bufaddr = 0;
134 	uv.bufsize = 0;
135 	uv.ixl_opcode = uv.ixlnewp->ixl_opcode;
136 	uv.hcihdr = 0;
137 	uv.hcistatus = 0;
138 	uv.hci_offset = 0;
139 	uv.hdr_offset = 0;
140 
141 	/* set done ok return status */
142 	uv.upd_status = 0;
143 
144 	/* evaluate request and prepare to perform update */
145 	prepstatus = hci1394_ixl_update_prepare(&uv);
146 	if (prepstatus != IXL_PREP_READY) {
147 		/*
148 		 * if either done or nothing to do or an evaluation error,
149 		 * return update status
150 		 */
151 		*resultp = uv.upd_status;
152 
153 		/* if prep evaluation error, return failure */
154 		if (prepstatus != IXL_PREP_SUCCESS) {
155 			TNF_PROBE_1_DEBUG(hci1394_ixl_update_error,
156 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
157 			    "IXL_PREP_FAILURE");
158 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
159 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
160 			return (DDI_FAILURE);
161 		}
162 		/* if no action or update done, return update successful */
163 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
164 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
165 		return (DDI_SUCCESS);
166 	}
167 
168 	/* perform update processing reservation of interrupt context */
169 	ret = hci1394_ixl_update_enable(&uv);
170 	if (ret != DDI_SUCCESS) {
171 
172 		/* error acquiring control of context - return */
173 		*resultp = uv.upd_status;
174 
175 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
176 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
177 		return (DDI_FAILURE);
178 	}
179 
180 	/* perform update risk analysis */
181 	if (hci1394_ixl_update_analysis(&uv) != DDI_SUCCESS) {
182 		/*
183 		 * return, if excessive risk or dma execution processing lost
184 		 * (note: caller risk override not yet implemented)
185 		 */
186 
187 		/* attempt intr processing cleanup, unless err is dmalost */
188 		if (uv.upd_status != IXL1394_EPRE_UPD_DMALOST) {
189 			(void) hci1394_ixl_update_endup(&uv);
190 		} else {
191 			/*
192 			 * error is dmalost, just release interrupt context.
193 			 * take the lock here to ensure an atomic read, modify,
194 			 * write of the "intr_flags" field while we try to
195 			 * clear the "in update" flag.  protects from the
196 			 * interrupt routine.
197 			 */
198 			mutex_enter(&ctxtp->intrprocmutex);
199 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
200 			mutex_exit(&ctxtp->intrprocmutex);
201 		}
202 		*resultp = uv.upd_status;
203 
204 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
205 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
206 		return (DDI_FAILURE);
207 	}
208 
209 
210 	/* perform requested update */
211 	if (hci1394_ixl_update_perform(&uv) != DDI_SUCCESS) {
212 		/*
213 		 * if non-completion condition, return update status
214 		 * attempt interrupt processing cleanup first
215 		 */
216 		(void) hci1394_ixl_update_endup(&uv);
217 
218 		*resultp = uv.upd_status;
219 
220 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
221 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
222 		return (DDI_FAILURE);
223 	}
224 
225 	/* evaluate update completion, setting completion status */
226 	if (hci1394_ixl_update_evaluate(&uv) != DDI_SUCCESS) {
227 		/*
228 		 * update failed - bad, just release interrupt context
229 		 * take the lock here too (jsut like above) to ensure an
230 		 * atomic read, modify, write of the "intr_flags" field
231 		 * while we try to clear the "in update" flag.  protects
232 		 * from the interrupt routine.
233 		 */
234 		mutex_enter(&ctxtp->intrprocmutex);
235 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
236 		mutex_exit(&ctxtp->intrprocmutex);
237 
238 		/* if DMA stopped or lost, formally stop context */
239 		if (uv.upd_status == HCI1394_IXL_INTR_DMASTOP) {
240 			hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
241 			    ID1394_DONE);
242 		} else if (uv.upd_status == HCI1394_IXL_INTR_DMALOST) {
243 			hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
244 			    ID1394_FAIL);
245 		}
246 
247 		*resultp = uv.upd_status;
248 
249 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
250 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
251 		return (DDI_FAILURE);
252 	}
253 
254 	/* perform interrupt processing cleanup */
255 	uv.upd_status = hci1394_ixl_update_endup(&uv);
256 
257 	/* return update completion status */
258 	*resultp = uv.upd_status;
259 
260 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit, HCI1394_TNF_HAL_STACK_ISOCH,
261 	    "");
262 	return (DDI_SUCCESS);
263 }
264 
265 /*
266  * hci1394_ixl_update_enable
267  *	Used to coordinate dynamic update activities with simultaneous
268  *	interrupt handler processing, while holding the context mutex
269  *      for as short a time as possible.
270  */
271 static int
272 hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp)
273 {
274 	int	status;
275 	boolean_t retry;
276 	uint_t	remretries;
277 
278 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_enable_enter,
279 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
280 
281 	retry = B_TRUE;
282 	/* set arbitrary number of retries before giving up */
283 	remretries = hci1394_upd_retries_before_fail;
284 	status = DDI_SUCCESS;
285 
286 	/*
287 	 * if waited for completion of interrupt processing generated callback,
288 	 * retry here
289 	 */
290 	ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
291 	mutex_enter(&uvp->ctxtp->intrprocmutex);
292 
293 	while (retry == B_TRUE) {
294 		retry = B_FALSE;
295 		remretries--;
296 
297 		/* failure if update processing is already in progress */
298 		if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
299 			uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
300 			status = DDI_FAILURE;
301 		} else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
302 			/*
303 			 * if have retried max number of times or if this update
304 			 * request is on the interrupt stack, which means that
305 			 * the callback function of the target driver initiated
306 			 * the update, set update failure.
307 			 */
308 			if ((remretries <= 0) ||
309 			    (servicing_interrupt())) {
310 				uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
311 				status = DDI_FAILURE;
312 			} else {
313 				/*
314 				 * if not on interrupt stack and retries not
315 				 * exhausted, free mutex, wait a short time
316 				 * and then retry.
317 				 */
318 				retry = B_TRUE;
319 				mutex_exit(&uvp->ctxtp->intrprocmutex);
320 				drv_usecwait(1);
321 				mutex_enter(&uvp->ctxtp->intrprocmutex);
322 				continue;
323 			}
324 		} else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
325 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
326 			status = DDI_FAILURE;
327 		}
328 	}
329 
330 	/* if context is available, reserve it for this update request */
331 	if (status == DDI_SUCCESS) {
332 		uvp->ctxtp->intr_flags |= HCI1394_ISO_CTXT_INUPDATE;
333 	}
334 
335 	ASSERT(MUTEX_HELD(&uvp->ctxtp->intrprocmutex));
336 	mutex_exit(&uvp->ctxtp->intrprocmutex);
337 
338 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_enable_exit,
339 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
340 	return (status);
341 }
342 
343 /*
344  * hci1394_ixl_update_endup()
345  *    The ending stage of coordinating with simultaneously running interrupts.
346  *    Perform interrupt processing sync tasks if we (update) had blocked the
347  *    interrupt out when it wanted a turn.
348  */
349 static int
350 hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp)
351 {
352 	uint_t status;
353 	hci1394_iso_ctxt_t *ctxtp;
354 
355 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_endup_enter,
356 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
357 
358 	status = HCI1394_IXL_INTR_NOERROR;
359 	ctxtp = uvp->ctxtp;
360 
361 	while (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
362 
363 		if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET) {
364 			/*
365 			 * We don't need to grab the lock here because
366 			 * the "intr_flags" field is only modified in two
367 			 * ways - one in UPDATE and one in INTR routine. Since
368 			 * we know that it can't be modified simulataneously
369 			 * in another UDPATE thread - that is assured by the
370 			 * checks in "update_enable" - we would only be trying
371 			 * to protect against the INTR thread.  And since we
372 			 * are going to clear a bit here (and check it again
373 			 * at the top of the loop) we are not really concerned
374 			 * about missing its being set by the INTR routine.
375 			 */
376 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
377 
378 			status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
379 			if (status ==  HCI1394_IXL_INTR_DMALOST) {
380 				/*
381 				 * Unlike above, we do care here as we are
382 				 * trying to clear the "in update" flag, and
383 				 * we don't want that lost because the INTR
384 				 * routine is trying to set its flag.
385 				 */
386 				mutex_enter(&uvp->ctxtp->intrprocmutex);
387 				ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
388 				mutex_exit(&uvp->ctxtp->intrprocmutex);
389 				continue;
390 			}
391 		}
392 
393 		ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
394 		mutex_enter(&uvp->ctxtp->intrprocmutex);
395 		if (!(ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET)) {
396 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
397 		}
398 		mutex_exit(&uvp->ctxtp->intrprocmutex);
399 	}
400 
401 	/* if DMA stopped or lost, formally stop context */
402 	if (status == HCI1394_IXL_INTR_DMASTOP) {
403 		hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_DONE);
404 	} else if (status == HCI1394_IXL_INTR_DMALOST) {
405 		hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
406 	}
407 
408 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_endup_exit,
409 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
410 	return (status);
411 }
412 
413 /*
414  * hci1394_ixl_update_prepare()
415  *    Preparation for the actual update (using temp uvp struct)
416  */
417 static int
418 hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp)
419 {
420 	int		    ret;
421 
422 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_enter,
423 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
424 
425 	/* both new and old ixl commands must be the same */
426 	if (uvp->ixlnewp->ixl_opcode != uvp->ixloldp->ixl_opcode) {
427 
428 		uvp->upd_status = IXL1394_EOPCODE_MISMATCH;
429 
430 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prepare_exit,
431 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
432 		    "EOPCODE_MISMATCH");
433 		return (IXL_PREP_FAILURE);
434 	}
435 
436 	/*
437 	 * perform evaluation and prepare update based on specific
438 	 * IXL command type
439 	 */
440 	switch (uvp->ixl_opcode) {
441 
442 	case IXL1394_OP_CALLBACK_U: {
443 		ixl1394_callback_t *old_callback_ixlp;
444 		ixl1394_callback_t *new_callback_ixlp;
445 
446 		old_callback_ixlp = (ixl1394_callback_t *)uvp->ixloldp;
447 		new_callback_ixlp = (ixl1394_callback_t *)uvp->ixlnewp;
448 
449 		/* perform update now without further evaluation */
450 		old_callback_ixlp->callback_arg =
451 		    new_callback_ixlp->callback_arg;
452 		old_callback_ixlp->callback = new_callback_ixlp->callback;
453 
454 		/* nothing else to do, return with done ok status */
455 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
456 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
457 		return (IXL_PREP_SUCCESS);
458 	}
459 
460 	case IXL1394_OP_JUMP_U:
461 		ret = hci1394_ixl_update_prep_jump(uvp);
462 
463 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
464 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
465 		return (ret);
466 
467 	case IXL1394_OP_SET_SKIPMODE_U:
468 		ret = hci1394_ixl_update_prep_set_skipmode(uvp);
469 
470 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
471 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
472 		return (ret);
473 
474 	case IXL1394_OP_SET_TAGSYNC_U:
475 		ret = hci1394_ixl_update_prep_set_tagsync(uvp);
476 
477 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
478 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
479 		return (ret);
480 
481 	case IXL1394_OP_RECV_PKT_U:
482 	case IXL1394_OP_RECV_PKT_ST_U:
483 		ret = hci1394_ixl_update_prep_recv_pkt(uvp);
484 
485 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
486 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
487 		return (ret);
488 
489 	case IXL1394_OP_RECV_BUF_U:
490 		ret = hci1394_ixl_update_prep_recv_buf(uvp);
491 
492 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
493 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
494 		return (ret);
495 
496 	case IXL1394_OP_SEND_PKT_U:
497 	case IXL1394_OP_SEND_PKT_ST_U:
498 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
499 		ret = hci1394_ixl_update_prep_send_pkt(uvp);
500 
501 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
502 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
503 		return (ret);
504 
505 	case IXL1394_OP_SEND_BUF_U:
506 		ret = hci1394_ixl_update_prep_send_buf(uvp);
507 
508 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
509 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
510 		return (ret);
511 
512 	default:
513 		/* ixl command being updated must be one of above, else error */
514 		uvp->upd_status = IXL1394_EOPCODE_DISALLOWED;
515 
516 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
517 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
518 		return (IXL_PREP_FAILURE);
519 	}
520 }
521 
522 /*
523  * hci1394_ixl_update_prep_jump()
524  *    Preparation for update of an IXL1394_OP_JUMP_U command.
525  */
526 static int
527 hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp)
528 {
529 	ixl1394_jump_t	    *old_jump_ixlp;
530 	ixl1394_jump_t	    *new_jump_ixlp;
531 	ixl1394_command_t   *ixlp;
532 	hci1394_xfer_ctl_t  *xferctlp;
533 	hci1394_desc_t	    *hcidescp;
534 	uint_t		    cbcnt;
535 	ddi_acc_handle_t    acc_hdl;
536 	ddi_dma_handle_t    dma_hdl;
537 	uint32_t	    desc_hdr;
538 	int		    err;
539 
540 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_enter,
541 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
542 
543 	old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
544 	new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
545 
546 	/* check if any change between new and old ixl jump command */
547 	if (new_jump_ixlp->label == old_jump_ixlp->label) {
548 
549 		/* if none, return with done ok status */
550 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_exit,
551 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
552 		return (IXL_PREP_SUCCESS);
553 	}
554 
555 	/* new ixl jump command label must be ptr to valid ixl label or NULL */
556 	if ((new_jump_ixlp->label != NULL) &&
557 	    (new_jump_ixlp->label->ixl_opcode != IXL1394_OP_LABEL)) {
558 
559 		/* if not jumping to label, return an error */
560 		uvp->upd_status = IXL1394_EJUMP_NOT_TO_LABEL;
561 
562 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prepare_exit,
563 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
564 		    "EJUMP_NOT_TO_LABEL");
565 		return (IXL_PREP_FAILURE);
566 	}
567 
568 	/*
569 	 * follow exec path from new ixl jump command label to determine new
570 	 * jump destination ixl xfer command
571 	 */
572 	(void) hci1394_ixl_find_next_exec_xfer(new_jump_ixlp->label, &cbcnt,
573 	    &ixlp);
574 	if (ixlp != NULL) {
575 		/*
576 		 * get the bound address of the first descriptor block reached
577 		 * by the jump destination.  (This descriptor is the first
578 		 * transfer command following the jumped-to label.)  Set the
579 		 * descriptor's address (with Z bits) into jumpaddr.
580 		 */
581 		uvp->jumpaddr = ((hci1394_xfer_ctl_t *)
582 		    ixlp->compiler_privatep)->dma[0].dma_bound;
583 	}
584 
585 	/*
586 	 * get associated xfer IXL command from compiler_privatep of old
587 	 * jump command
588 	 */
589 	if ((uvp->ixlxferp = (ixl1394_command_t *)
590 	    old_jump_ixlp->compiler_privatep) == NULL) {
591 
592 		/* if none, return an error */
593 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
594 
595 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_jump_exit,
596 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
597 		    "EORIG_IXL_CORRUPTED");
598 		return (IXL_PREP_FAILURE);
599 	}
600 
601 	/*
602 	 * get the associated IXL xfer command's last dma descriptor block
603 	 * last descriptor, then get hcihdr from its hdr field,
604 	 * removing interrupt enabled bits
605 	 */
606 	xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep;
607 	hcidescp = (hci1394_desc_t *)xferctlp->dma[xferctlp->cnt - 1].dma_descp;
608 	acc_hdl  = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
609 	dma_hdl  = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
610 
611 	/* Sync the descriptor before we grab the header(s) */
612 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
613 	    DDI_DMA_SYNC_FORCPU);
614 	if (err != DDI_SUCCESS) {
615 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
616 
617 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_jump_exit,
618 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
619 		    "EINTERNAL_ERROR: dma_sync() failed");
620 		return (IXL_PREP_FAILURE);
621 	}
622 
623 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
624 	uvp->hcihdr = desc_hdr & ~DESC_INTR_ENBL;
625 
626 	/* set depth to last dma descriptor block & update count to 1 */
627 	uvp->ixldepth = xferctlp->cnt - 1;
628 	uvp->ixlcount = 1;
629 
630 	/*
631 	 * if there is only one dma descriptor block and IXL xfer command
632 	 * inited by a label or have found callbacks along the exec path to the
633 	 * new destination IXL xfer command, enable interrupt in hcihdr value
634 	 */
635 	if (((xferctlp->cnt == 1) &&
636 	    ((xferctlp->ctl_flags & XCTL_LABELLED) != 0)) || (cbcnt != 0)) {
637 
638 		uvp->hcihdr |= DESC_INTR_ENBL;
639 	}
640 
641 	/* If either old or new destination was/is NULL, enable interrupt */
642 	if ((new_jump_ixlp->label == NULL) || (old_jump_ixlp->label == NULL)) {
643 		uvp->hcihdr |= DESC_INTR_ENBL;
644 	}
645 
646 	/*
647 	 * if xfer type is xmit and skip mode for this for this xfer command is
648 	 * IXL1394_SKIP_TO_NEXT then set uvp->skipmode to IXL1394_SKIP_TO_NEXT
649 	 * and set uvp->skipxferp to uvp->jumpaddr and set uvp->hci_offset to
650 	 * offset from last dma descriptor to first dma descriptor
651 	 * (where skipaddr goes).
652 	 *
653 	 * update perform processing will have to set skip branch address to
654 	 * same location as jump destination in this case.
655 	 */
656 	uvp->skipmode = IXL1394_SKIP_TO_STOP;
657 	if ((uvp->ixlxferp->ixl_opcode & IXL1394_OPF_ONXMIT) != 0) {
658 
659 		if ((xferctlp->skipmodep && (((ixl1394_set_skipmode_t *)
660 		    xferctlp->skipmodep)->skipmode == IXL1394_SKIP_TO_NEXT)) ||
661 		    (uvp->ctxtp->default_skipmode == IXL1394_OPF_ONXMIT)) {
662 
663 			uvp->skipmode = IXL1394_SKIP_TO_NEXT;
664 			uvp->skipaddr = uvp->jumpaddr;
665 
666 			/*
667 			 * calc hci_offset to first descriptor (where skipaddr
668 			 * goes) of dma descriptor block from current (last)
669 			 * descriptor of the descriptor block (accessed in
670 			 * xfer_ctl dma_descp of IXL xfer command)
671 			 */
672 			if (uvp->ixlxferp->ixl_opcode ==
673 			    IXL1394_OP_SEND_HDR_ONLY) {
674 				/*
675 				 * send header only is (Z bits - 2)
676 				 * descriptor components back from last one
677 				 */
678 				uvp->hci_offset -= 2;
679 			} else {
680 				/*
681 				 * all others are (Z bits - 1) descriptor
682 				 * components back from last component
683 				 */
684 				uvp->hci_offset -= 1;
685 			}
686 		}
687 	}
688 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_exit,
689 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
690 	return (IXL_PREP_READY);
691 }
692 
693 /*
694  * hci1394_ixl_update_prep_set_skipmode()
695  *    Preparation for update of an IXL1394_OP_SET_SKIPMODE_U command.
696  */
697 static int
698 hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp)
699 {
700 	ixl1394_set_skipmode_t	*old_set_skipmode_ixlp;
701 	ixl1394_set_skipmode_t	*new_set_skipmode_ixlp;
702 	ixl1394_command_t	*ixlp;
703 	hci1394_xfer_ctl_t	*xferctlp;
704 
705 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_skipmode_enter,
706 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
707 
708 	old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
709 	new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
710 
711 	/* check if new set skipmode is change from old set skipmode */
712 	if (new_set_skipmode_ixlp->skipmode ==
713 	    old_set_skipmode_ixlp->skipmode) {
714 
715 		if ((new_set_skipmode_ixlp->skipmode !=
716 		    IXL1394_SKIP_TO_LABEL) ||
717 		    (old_set_skipmode_ixlp->label ==
718 		    new_set_skipmode_ixlp->label)) {
719 
720 			TNF_PROBE_0_DEBUG(
721 			    hci1394_ixl_update_prep_set_skipmode_exit,
722 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
723 
724 			/* No change, return with done ok status */
725 			return (IXL_PREP_SUCCESS);
726 		}
727 	}
728 
729 	/* find associated ixl xfer commnd by following old ixl links */
730 	uvp->ixlxferp = uvp->ixloldp->next_ixlp;
731 	while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
732 	    IXL1394_OPF_ISXFER) == 0) ||
733 	    ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) !=	0))) {
734 
735 		uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
736 	}
737 
738 	/* return an error if no ixl xfer command found */
739 	if (uvp->ixlxferp == NULL) {
740 
741 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
742 
743 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
744 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string,
745 		    errmsg, "EORIG_IXL_CORRUPTED");
746 		return (IXL_PREP_FAILURE);
747 	}
748 
749 	/*
750 	 * get Z bits (number of descriptor components in descriptor block)
751 	 * from a dma bound addr in the xfer_ctl struct of the IXL xfer command
752 	 */
753 	if ((xferctlp = (hci1394_xfer_ctl_t *)
754 	    uvp->ixlxferp->compiler_privatep) == NULL) {
755 
756 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
757 
758 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
759 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
760 		    "EORIG_IXL_CORRUPTED");
761 		return (IXL_PREP_FAILURE);
762 	}
763 	uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
764 
765 	/*
766 	 * determine hci_offset to first component (where skipaddr goes) of
767 	 * dma descriptor block from current (last) descriptor component of
768 	 * desciptor block (accessed in xfer_ctl dma_descp of IXL xfer command)
769 	 */
770 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
771 		/*
772 		 * "send header only" is (Z bits - 2) descriptors back
773 		 * from last one
774 		 */
775 		uvp->hci_offset -= 2;
776 	} else {
777 		/*
778 		 * all others are (Z bits - 1) descroptors back from
779 		 * last descriptor.
780 		 */
781 		uvp->hci_offset -= 1;
782 	}
783 
784 	/* set depth to zero and count to update all dma descriptors */
785 	uvp->ixldepth = 0;
786 	uvp->ixlcount = xferctlp->cnt;
787 
788 	/* set new skipmode and validate */
789 	uvp->skipmode = new_set_skipmode_ixlp->skipmode;
790 
791 	if ((uvp->skipmode != IXL1394_SKIP_TO_NEXT) &&
792 	    (uvp->skipmode != IXL1394_SKIP_TO_SELF) &&
793 	    (uvp->skipmode != IXL1394_SKIP_TO_STOP) &&
794 	    (uvp->skipmode != IXL1394_SKIP_TO_LABEL)) {
795 
796 		/* return an error if invalid mode */
797 		uvp->upd_status = IXL1394_EBAD_SKIPMODE;
798 
799 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
800 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string,
801 		    errmsg, "EBAD_SKIPMODE");
802 		return (IXL_PREP_FAILURE);
803 	}
804 
805 	/* if mode is skip to label */
806 	if (uvp->skipmode == IXL1394_SKIP_TO_LABEL) {
807 
808 		/* verify label field is valid ixl label cmd */
809 		if ((new_set_skipmode_ixlp->label == NULL) ||
810 		    (new_set_skipmode_ixlp->label->ixl_opcode !=
811 		    IXL1394_OP_LABEL)) {
812 
813 			/* Error - not skipping to valid label */
814 			uvp->upd_status = IXL1394_EBAD_SKIP_LABEL;
815 
816 			TNF_PROBE_0_DEBUG(
817 			    hci1394_ixl_update_prep_set_skipmode_exit,
818 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
819 			return (IXL_PREP_FAILURE);
820 		}
821 
822 		/*
823 		 * follow new skip exec path after label to next xfer
824 		 * IXL command
825 		 */
826 		(void) hci1394_ixl_find_next_exec_xfer(
827 		    new_set_skipmode_ixlp->label, NULL, &ixlp);
828 
829 		/*
830 		 * set skip destination IXL xfer command.
831 		 * after update set into old set skip mode IXL compiler_privatep
832 		 */
833 		if ((uvp->skipxferp = ixlp) != NULL) {
834 			/*
835 			 * set skipaddr to be the first dma descriptor block's
836 			 * dma bound address w/Z bits
837 			 */
838 			xferctlp = (hci1394_xfer_ctl_t *)
839 			    ixlp->compiler_privatep;
840 			uvp->skipaddr = xferctlp->dma[0].dma_bound;
841 		}
842 	}
843 
844 	/*
845 	 * if mode is skip to next, get skipaddr for last dma descriptor block
846 	 */
847 	if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
848 		/* follow normal exec path to next xfer ixl command */
849 		(void) hci1394_ixl_find_next_exec_xfer(uvp->ixlxferp->next_ixlp,
850 		    NULL, &ixlp);
851 
852 		/*
853 		 * get skip_next destination IXL xfer command
854 		 * (for last iteration)
855 		 */
856 		if (ixlp != NULL) {
857 			/*
858 			 * set skipaddr to first dma descriptor block's
859 			 * dma bound address w/Z bits
860 			 */
861 			xferctlp = (hci1394_xfer_ctl_t *)
862 			    ixlp->compiler_privatep;
863 			uvp->skipaddr = xferctlp->dma[0].dma_bound;
864 		}
865 	}
866 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
867 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
868 	return (IXL_PREP_READY);
869 }
870 
871 /*
872  * hci1394_ixl_update_prep_set_tagsync()
873  *    Preparation for update of an IXL1394_OP_SET_TAGSYNC_U command.
874  */
875 static int
876 hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp)
877 {
878 	ixl1394_set_tagsync_t	*old_set_tagsync_ixlp;
879 	ixl1394_set_tagsync_t	*new_set_tagsync_ixlp;
880 	hci1394_xfer_ctl_t	*xferctlp;
881 
882 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_enter,
883 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
884 
885 	old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
886 	new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
887 
888 	/* check if new set tagsync is change from old set tagsync. */
889 	if ((new_set_tagsync_ixlp->tag == old_set_tagsync_ixlp->tag) &&
890 	    (new_set_tagsync_ixlp->sync == old_set_tagsync_ixlp->sync)) {
891 
892 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
893 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
894 
895 		/* no change, return with done ok status */
896 		return (IXL_PREP_SUCCESS);
897 	}
898 
899 	/* find associated IXL xfer commnd by following old ixl links */
900 	uvp->ixlxferp = uvp->ixloldp->next_ixlp;
901 	while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
902 	    IXL1394_OPF_ISXFER) == 0) ||
903 	    ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) != 0))) {
904 
905 		uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
906 	}
907 
908 	/* return an error if no IXL xfer command found */
909 	if (uvp->ixlxferp == NULL) {
910 
911 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
912 
913 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
914 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
915 		return (IXL_PREP_FAILURE);
916 	}
917 
918 	/* is IXL xfer command an IXL1394_OP_SEND_NO_PKT? */
919 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_NO_PKT) {
920 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
921 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
922 
923 		/* no update needed, return done ok status */
924 		return (IXL_PREP_SUCCESS);
925 	}
926 
927 	/* build new pkthdr1 from new IXL tag/sync bits */
928 	uvp->pkthdr1 = (uvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
929 	    (new_set_tagsync_ixlp->tag << DESC_PKT_TAG_SHIFT) |
930 	    (uvp->ctxtp->isochan << DESC_PKT_CHAN_SHIFT) |
931 	    (new_set_tagsync_ixlp->sync << DESC_PKT_SY_SHIFT);
932 
933 	/*
934 	 * get Z bits (# of descriptor components in descriptor block) from
935 	 * any dma bound address in the xfer_ctl struct of the IXL xfer cmd
936 	 */
937 	if ((xferctlp =	(hci1394_xfer_ctl_t *)
938 	    uvp->ixlxferp->compiler_privatep) == NULL) {
939 
940 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
941 
942 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
943 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
944 		return (IXL_PREP_FAILURE);
945 	}
946 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
947 
948 	/*
949 	 * determine hdr_offset from the current(last) descriptor of the
950 	 * DMA descriptor block to the descriptor where pkthdr1 goes
951 	 * by examining IXL xfer command
952 	 */
953 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
954 		/*
955 		 * if IXL send header only, the current (last)
956 		 * descriptor is the one
957 		 */
958 		uvp->hdr_offset = 0;
959 	} else {
960 		/*
961 		 * all others are the first descriptor (Z bits - 1)
962 		 * back from the last
963 		 */
964 		uvp->hdr_offset -= 1;
965 	}
966 
967 	/* set depth to zero and count to update all dma descriptors */
968 	uvp->ixldepth = 0;
969 	uvp->ixlcount = xferctlp->cnt;
970 
971 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
972 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
973 	return (IXL_PREP_READY);
974 }
975 
976 /*
977  * hci1394_ixl_update_prep_recv_pkt()
978  *    Preparation for update of an IXL1394_OP_RECV_PKT_U or
979  *    IXL1394_OP_RECV_PKT_ST_U command.
980  */
981 static int
982 hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp)
983 {
984 	ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
985 	ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
986 	hci1394_xfer_ctl_t *xferctlp;
987 	hci1394_desc_t	   *hcidescp;
988 	ddi_acc_handle_t   acc_hdl;
989 	ddi_dma_handle_t   dma_hdl;
990 	uint32_t	   desc_hdr;
991 	int		   err;
992 
993 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_enter,
994 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
995 
996 	old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
997 	new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
998 
999 	/* check if any change between new and old IXL xfer commands */
1000 	if ((new_xfer_pkt_ixlp->size == old_xfer_pkt_ixlp->size) &&
1001 	    (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
1002 	    old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
1003 	    (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
1004 
1005 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1006 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1007 
1008 		/* no change. return with done ok status */
1009 		return (IXL_PREP_SUCCESS);
1010 	}
1011 
1012 	/* if new IXL buffer addrs are null, return error */
1013 	if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == 0) ||
1014 	    (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
1015 
1016 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1017 
1018 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1019 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1020 		return (IXL_PREP_FAILURE);
1021 	}
1022 
1023 	/* if IXL xfer command is not xfer start command */
1024 	if (uvp->ixl_opcode == IXL1394_OP_RECV_PKT_U) {
1025 		/*
1026 		 * find IXL xfer start command in the compiler_privatep of the
1027 		 * old IXL xfer command
1028 		 */
1029 		uvp->ixlxferp = (ixl1394_command_t *)
1030 		    uvp->ixloldp->compiler_privatep;
1031 
1032 		if (uvp->ixlxferp == NULL) {
1033 
1034 			/* Error - no IXL xfer start command found */
1035 			uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1036 
1037 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1038 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1039 			return (IXL_PREP_FAILURE);
1040 		}
1041 	} else {
1042 		/* IXL xfer command is the IXL xfer start command */
1043 		uvp->ixlxferp = uvp->ixloldp;
1044 	}
1045 
1046 	/* check that xfer_ctl is present in the IXL xfer start command */
1047 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1048 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1049 
1050 		/* Error - no xfer_ctl struct found */
1051 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1052 
1053 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1054 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1055 		return (IXL_PREP_FAILURE);
1056 	}
1057 
1058 	/* set depth to zero and count to 1 to update dma descriptor */
1059 	uvp->ixldepth = 0;
1060 	uvp->ixlcount = 1;
1061 
1062 	/*
1063 	 * get Z bits (number of descriptors in descriptor block) from the DMA
1064 	 * bound address in the xfer_ctl struct of the IXL xfer start cpmmand.
1065 	 */
1066 	uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
1067 
1068 	/*
1069 	 * set offset from the current(last) descriptor to the descriptor for
1070 	 * this packet command
1071 	 */
1072 	uvp->hci_offset -= (1 + uvp->ixloldp->compiler_resv);
1073 
1074 	/*
1075 	 * set bufsize to the new IXL xfer size, and bufaddr to the new
1076 	 * IXL xfer bufp
1077 	 */
1078 	uvp->bufsize = ((ixl1394_xfer_pkt_t *)uvp->ixlnewp)->size;
1079 	uvp->bufaddr = ((ixl1394_xfer_pkt_t *)
1080 	    uvp->ixlnewp)->ixl_buf.ixldmac_addr;
1081 
1082 	/*
1083 	 * update old hcihdr w/new bufsize, set hcistatus rescnt to
1084 	 * new bufsize
1085 	 */
1086 	hcidescp = (hci1394_desc_t *)xferctlp->dma[0].dma_descp -
1087 	    uvp->hci_offset;
1088 	acc_hdl  = xferctlp->dma[0].dma_buf->bi_handle;
1089 	dma_hdl  = xferctlp->dma[0].dma_buf->bi_dma_handle;
1090 
1091 	/* Sync the descriptor before we grab the header(s) */
1092 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
1093 	    DDI_DMA_SYNC_FORCPU);
1094 	if (err != DDI_SUCCESS) {
1095 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1096 
1097 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1098 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
1099 		    "EINTERNAL_ERROR: dma_sync() failed");
1100 		return (IXL_PREP_FAILURE);
1101 	}
1102 
1103 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1104 	uvp->hcihdr = desc_hdr;
1105 	uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
1106 	uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1107 	    DESC_HDR_REQCOUNT_MASK;
1108 	uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
1109 	    DESC_ST_RESCOUNT_MASK;
1110 
1111 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1112 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1113 	return (IXL_PREP_READY);
1114 }
1115 
1116 /*
1117  * hci1394_ixl_update_prep_recv_buf()
1118  *    Preparation for update of an IXL1394_OP_RECV_BUF_U command.
1119  */
1120 static int
1121 hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp)
1122 {
1123 	ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1124 	ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1125 	hci1394_xfer_ctl_t *xferctlp;
1126 
1127 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_enter,
1128 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1129 
1130 	old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1131 	new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1132 
1133 	/* check if any change between new and old IXL xfer commands */
1134 	if ((new_xfer_buf_ixlp->size ==	old_xfer_buf_ixlp->size) &&
1135 	    (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
1136 	    old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
1137 	    (new_xfer_buf_ixlp->mem_bufp == old_xfer_buf_ixlp->mem_bufp)) {
1138 
1139 		if (((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) != 0) ||
1140 		    (new_xfer_buf_ixlp->pkt_size ==
1141 		    old_xfer_buf_ixlp->pkt_size)) {
1142 
1143 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1144 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1145 
1146 			/* no change. return with done ok status */
1147 			return (IXL_PREP_SUCCESS);
1148 		}
1149 	}
1150 
1151 	/* if new IXL buffer addrs are null, return error */
1152 	if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == 0) ||
1153 	    (new_xfer_buf_ixlp->mem_bufp == NULL)) {
1154 
1155 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1156 
1157 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1158 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1159 		return (IXL_PREP_FAILURE);
1160 	}
1161 
1162 	/*
1163 	 * if not buffer fill mode, check that the new pkt_size > 0 and
1164 	 * new size/pkt_size doesn't change the count of dma descriptor
1165 	 * blocks required
1166 	 */
1167 	if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
1168 		if ((new_xfer_buf_ixlp->pkt_size == 0) ||
1169 		    ((new_xfer_buf_ixlp->size /	new_xfer_buf_ixlp->pkt_size) !=
1170 		    (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
1171 
1172 			/* count changes. return an error */
1173 			uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
1174 
1175 			TNF_PROBE_0_DEBUG(
1176 			    hci1394_ixl_update_prep_recv_buf_exit,
1177 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1178 			return (IXL_PREP_FAILURE);
1179 		}
1180 	}
1181 
1182 	/* set old IXL xfer command as the current IXL xfer command */
1183 	uvp->ixlxferp = uvp->ixloldp;
1184 
1185 	/* check that the xfer_ctl struct is present in IXL xfer command */
1186 	if ((xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep)
1187 	    == NULL) {
1188 
1189 		/* return an error if no xfer_ctl struct is found for command */
1190 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1191 
1192 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1193 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1194 		return (IXL_PREP_FAILURE);
1195 	}
1196 
1197 	/* set depth to zero and count to update all dma descriptors */
1198 	uvp->ixldepth = 0;
1199 	uvp->ixlcount = xferctlp->cnt;
1200 
1201 	/* set bufsize to new pkt_size (or to new size if buffer fill mode) */
1202 	if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
1203 		uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
1204 	} else {
1205 		uvp->bufsize = new_xfer_buf_ixlp->size;
1206 	}
1207 
1208 	/* set bufaddr to new ixl_buf */
1209 	uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1210 
1211 	/* set hcihdr reqcnt and hcistatus rescnt to new bufsize */
1212 	uvp->hci_offset = 0;
1213 	uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1214 	    DESC_HDR_REQCOUNT_MASK;
1215 	uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
1216 	    DESC_ST_RESCOUNT_MASK;
1217 
1218 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1219 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1220 	return (IXL_PREP_READY);
1221 }
1222 
1223 /*
1224  * hci1394_ixl_update_prep_send_pkt()
1225  *    Preparation for update of an IXL1394_OP_SEND_PKT_U command,
1226  *    IXL1394_OP_SEND_PKT_ST_U command and IXL1394_OP_SEND_PKT_WHDR_ST_U
1227  *    command.
1228  */
1229 static int
1230 hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp)
1231 {
1232 	ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1233 	ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1234 	hci1394_xfer_ctl_t *xferctlp;
1235 	hci1394_desc_imm_t *hcidescp;
1236 	ddi_acc_handle_t   acc_hdl;
1237 	ddi_dma_handle_t   dma_hdl;
1238 	uint32_t	   desc_hdr, desc_hdr2;
1239 	int		   err;
1240 
1241 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_enter,
1242 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1243 
1244 	old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1245 	new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1246 
1247 	/* check if any change between new and old IXL xfer commands */
1248 	if ((new_xfer_pkt_ixlp->size ==	old_xfer_pkt_ixlp->size) &&
1249 	    (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
1250 	    old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
1251 	    (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
1252 
1253 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1254 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1255 
1256 		/* if none, return with done ok status */
1257 		return (IXL_PREP_SUCCESS);
1258 	}
1259 
1260 	/* if new ixl buffer addrs are null, return error */
1261 	if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == 0) ||
1262 	    (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
1263 
1264 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1265 
1266 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1267 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1268 		return (IXL_PREP_FAILURE);
1269 	}
1270 
1271 	/* error if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode and size < 4 */
1272 	if ((uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) &&
1273 	    (new_xfer_pkt_ixlp->size < 4)) {
1274 
1275 		uvp->upd_status = IXL1394_EPKT_HDR_MISSING;
1276 
1277 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1278 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1279 		return (IXL_PREP_FAILURE);
1280 	}
1281 
1282 	/* if IXL xfer command is not an IXL xfer start command */
1283 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_U) {
1284 		/*
1285 		 * find IXL xfer start command in the compiler_privatep of the
1286 		 * old IXL xfer command
1287 		 */
1288 		uvp->ixlxferp = (ixl1394_command_t *)
1289 		    old_xfer_pkt_ixlp->compiler_privatep;
1290 
1291 		if (uvp->ixlxferp == NULL) {
1292 			/* error if no IXL xfer start command found */
1293 			uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1294 
1295 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1296 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1297 			return (IXL_PREP_FAILURE);
1298 		}
1299 	} else {
1300 		/* IXL xfer command is the IXL xfer start command */
1301 		uvp->ixlxferp = uvp->ixloldp;
1302 	}
1303 
1304 	/*
1305 	 * get Z bits (number of descriptor components in the descriptor block)
1306 	 * from a dma bound address in the xfer_ctl structure of the IXL
1307 	 * xfer start command
1308 	 */
1309 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1310 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1311 
1312 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1313 
1314 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1315 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1316 		return (IXL_PREP_FAILURE);
1317 	}
1318 
1319 	/* set depth to zero and count to 1 to update dma descriptor */
1320 	uvp->ixldepth = 0;
1321 	uvp->ixlcount = 1;
1322 
1323 	/*
1324 	 * set offset to the header(first) descriptor from the
1325 	 * current(last) descriptor
1326 	 */
1327 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
1328 
1329 	/*
1330 	 * set offset from the current(last) descriptor to the descriptor for
1331 	 * this packet command
1332 	 */
1333 	uvp->hci_offset = uvp->hdr_offset - 2 - uvp->ixloldp->compiler_resv;
1334 
1335 	/* set bufsize to new pkt buffr size, set bufaddr to new bufp */
1336 	uvp->bufsize = new_xfer_pkt_ixlp->size;
1337 	uvp->bufaddr = new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1338 
1339 	/*
1340 	 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1341 	 * step over hdr
1342 	 */
1343 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
1344 		uvp->bufsize -= 4;
1345 		uvp->bufaddr += 4;
1346 	}
1347 
1348 	/* update old hcihdr w/new bufsize */
1349 	hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
1350 	    uvp->hci_offset;
1351 	acc_hdl  = xferctlp->dma[0].dma_buf->bi_handle;
1352 	dma_hdl  = xferctlp->dma[0].dma_buf->bi_dma_handle;
1353 
1354 	/* Sync the descriptor before we grab the header(s) */
1355 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1356 	    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1357 	if (err != DDI_SUCCESS) {
1358 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1359 
1360 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1361 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
1362 		    "EINTERNAL_ERROR: dma_sync() failed");
1363 		return (IXL_PREP_FAILURE);
1364 	}
1365 
1366 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1367 	uvp->hcihdr = desc_hdr;
1368 	uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
1369 	uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1370 	    DESC_HDR_REQCOUNT_MASK;
1371 
1372 	/* update old pkthdr2 w/new bufsize. error if exceeds 16k */
1373 	desc_hdr2 = ddi_get32(acc_hdl, &hcidescp->q2);
1374 	uvp->pkthdr2 = desc_hdr2;
1375 	uvp->pkthdr2 = (uvp->pkthdr2 & DESC_PKT_DATALEN_MASK) >>
1376 	    DESC_PKT_DATALEN_SHIFT;
1377 	uvp->pkthdr2 -= old_xfer_pkt_ixlp->size;
1378 	uvp->pkthdr2 += uvp->bufsize;
1379 
1380 	if (uvp->pkthdr2 > 0xFFFF) {
1381 		uvp->upd_status = IXL1394_EPKTSIZE_MAX_OFLO;
1382 
1383 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1384 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1385 		return (IXL_PREP_FAILURE);
1386 	}
1387 	uvp->pkthdr2 = (uvp->pkthdr2 << DESC_PKT_DATALEN_SHIFT) &
1388 	    DESC_PKT_DATALEN_MASK;
1389 
1390 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1391 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1392 	return (IXL_PREP_READY);
1393 }
1394 
1395 /*
1396  * hci1394_ixl_update_prep_send_buf()
1397  *    Preparation for update of an IXL1394_OP_SEND_BUF_U command.
1398  */
1399 static int
1400 hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp)
1401 {
1402 	ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1403 	ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1404 	hci1394_xfer_ctl_t *xferctlp;
1405 
1406 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_enter,
1407 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1408 
1409 	old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1410 	new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1411 
1412 	/* check if any change between new and old IXL xfer commands */
1413 	if ((new_xfer_buf_ixlp->size == old_xfer_buf_ixlp->size) &&
1414 	    (new_xfer_buf_ixlp->pkt_size == old_xfer_buf_ixlp->pkt_size) &&
1415 	    (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
1416 	    old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
1417 	    (new_xfer_buf_ixlp->mem_bufp == old_xfer_buf_ixlp->mem_bufp)) {
1418 
1419 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1420 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1421 
1422 		/* no change, return with done ok status */
1423 		return (IXL_PREP_SUCCESS);
1424 	}
1425 
1426 	/* if new IXL buffer addresses are null, return error */
1427 	if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == 0) ||
1428 	    (new_xfer_buf_ixlp->mem_bufp == NULL)) {
1429 
1430 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1431 
1432 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1433 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1434 		return (IXL_PREP_FAILURE);
1435 	}
1436 
1437 	/*
1438 	 * check that the new pkt_size > 0 and the new size/pkt_size
1439 	 * doesn't change the count of DMA descriptor blocks required
1440 	 */
1441 	if ((new_xfer_buf_ixlp->pkt_size == 0) ||
1442 	    ((new_xfer_buf_ixlp->size / new_xfer_buf_ixlp->pkt_size) !=
1443 	    (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
1444 
1445 		/* Error - new has different pkt count than old */
1446 		uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
1447 
1448 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1449 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1450 		return (IXL_PREP_FAILURE);
1451 	}
1452 
1453 	/* set the old IXL xfer command as the current IXL xfer command */
1454 	uvp->ixlxferp = uvp->ixloldp;
1455 
1456 	/*
1457 	 * get Z bits (number of descriptor components in descriptor block)
1458 	 * from a DMA bound address in the xfer_ctl struct of the
1459 	 * IXL xfer command
1460 	 */
1461 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1462 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1463 
1464 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1465 
1466 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1467 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1468 		return (IXL_PREP_FAILURE);
1469 	}
1470 
1471 	/* set depth to zero and count to update all dma descriptors */
1472 	uvp->ixldepth = 0;
1473 	uvp->ixlcount = xferctlp->cnt;
1474 
1475 	/*
1476 	 * set offset to the header(first) descriptor from the current (last)
1477 	 * descriptor.
1478 	 */
1479 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
1480 
1481 	/* set offset to the only(last) xfer descriptor */
1482 	uvp->hci_offset = 0;
1483 
1484 	/* set bufsize to the new pkt_size, set bufaddr to the new bufp */
1485 	uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
1486 	uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1487 
1488 	/*
1489 	 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1490 	 * step over header (a quadlet)
1491 	 */
1492 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
1493 		uvp->bufsize -= 4;
1494 		uvp->bufaddr += 4;
1495 	}
1496 
1497 	/* set hcihdr to new bufsize */
1498 	uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1499 	    DESC_HDR_REQCOUNT_MASK;
1500 
1501 	/* set pkthdr2 to new bufsize */
1502 	uvp->pkthdr2 = (uvp->bufsize << DESC_PKT_DATALEN_SHIFT) &
1503 	    DESC_PKT_DATALEN_MASK;
1504 
1505 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1506 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1507 	return (IXL_PREP_READY);
1508 }
1509 
1510 /*
1511  * hci1394_ixl_update_perform()
1512  *    performs the actual update into DMA memory.
1513  */
1514 static int
1515 hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp)
1516 {
1517 	int			ii;
1518 	uint_t			skipaddrlast;
1519 	hci1394_xfer_ctl_t	*xferctlp;
1520 	hci1394_desc_imm_t	*hcidescp;
1521 	hci1394_iso_ctxt_t	*ctxtp;
1522 	ddi_acc_handle_t	acc_hdl;
1523 	ddi_dma_handle_t	dma_hdl;
1524 	int			err;
1525 
1526 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_enter,
1527 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1528 
1529 	ctxtp = uvp->ctxtp;
1530 
1531 	/*
1532 	 * if no target ixl xfer command to be updated or it has
1533 	 * no xfer_ctl struct, then internal error.
1534 	 */
1535 	if ((uvp->ixlxferp == NULL) ||
1536 	    ((xferctlp = (hci1394_xfer_ctl_t *)
1537 	    uvp->ixlxferp->compiler_privatep) == NULL)) {
1538 
1539 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1540 
1541 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1542 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1543 
1544 		return (DDI_FAILURE);
1545 	}
1546 
1547 	/* perform update based on specific ixl command type */
1548 	switch (uvp->ixl_opcode) {
1549 
1550 	case IXL1394_OP_JUMP_U: {
1551 		ixl1394_jump_t *old_jump_ixlp;
1552 		ixl1394_jump_t *new_jump_ixlp;
1553 
1554 		old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
1555 		new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
1556 
1557 		/*
1558 		 * set new hdr and new branch fields into last component of last
1559 		 * dma descriptor block of ixl xfer cmd associated with
1560 		 * ixl jump cmd
1561 		 */
1562 		hcidescp = (hci1394_desc_imm_t *)
1563 		    xferctlp->dma[xferctlp->cnt - 1].dma_descp;
1564 		acc_hdl	 = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
1565 		dma_hdl	 =
1566 		    xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
1567 
1568 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1569 		ddi_put32(acc_hdl, &hcidescp->branch, uvp->jumpaddr);
1570 
1571 		/*
1572 		 * if xfer type is send and skip mode is IXL1394__SKIP_TO_NEXT
1573 		 * also set branch location into branch field of first
1574 		 * component (skip to address) of last dma descriptor block
1575 		 */
1576 		if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1577 			hcidescp -= uvp->hci_offset;
1578 			ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
1579 		}
1580 
1581 		/* Sync descriptor for device (desc was modified) */
1582 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1583 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1584 		if (err != DDI_SUCCESS) {
1585 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1586 
1587 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1588 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1589 			return (DDI_FAILURE);
1590 		}
1591 
1592 		/* set old ixl jump cmd label from new ixl jump cmd label */
1593 		old_jump_ixlp->label = new_jump_ixlp->label;
1594 		break;
1595 	}
1596 	case IXL1394_OP_SET_SKIPMODE_U: {
1597 		ixl1394_set_skipmode_t *old_set_skipmode_ixlp;
1598 		ixl1394_set_skipmode_t *new_set_skipmode_ixlp;
1599 
1600 		old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
1601 		new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
1602 
1603 		/*
1604 		 * if skip to next mode, save skip addr for last iteration
1605 		 * thru dma descriptor blocks for associated ixl xfer command
1606 		 */
1607 		if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1608 			skipaddrlast = uvp->skipaddr;
1609 		}
1610 
1611 		/*
1612 		 * iterate through set of dma descriptor blocks for associated
1613 		 * ixl xfer start cmd and set new skip address into first hci
1614 		 * descriptor of each if skip next or skip self, first determine
1615 		 * address in each iteration
1616 		 */
1617 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1618 			hcidescp = (hci1394_desc_imm_t *)
1619 			    xferctlp->dma[ii].dma_descp - uvp->hci_offset;
1620 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1621 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1622 
1623 			if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1624 				if (ii < (xferctlp->cnt - 1)) {
1625 					uvp->skipaddr =
1626 					    xferctlp->dma[ii + 1].dma_bound;
1627 				} else {
1628 					uvp->skipaddr = skipaddrlast;
1629 				}
1630 			} else if (uvp->skipmode == IXL1394_SKIP_TO_SELF) {
1631 				uvp->skipaddr = xferctlp->dma[ii].dma_bound;
1632 			}
1633 
1634 			ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
1635 
1636 			/* Sync descriptor for device (desc was modified) */
1637 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1638 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1639 			if (err != DDI_SUCCESS) {
1640 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1641 
1642 				TNF_PROBE_0_DEBUG(
1643 				    hci1394_ixl_update_perform_exit,
1644 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1645 				return (DDI_FAILURE);
1646 			}
1647 		}
1648 
1649 		/*
1650 		 * set old ixl set skip mode cmd mode and label from new ixl cmd
1651 		 * set old ixl set skip mode cmd compilier_privatep to
1652 		 * uvp->skipxferp
1653 		 */
1654 		old_set_skipmode_ixlp->skipmode = uvp->skipmode;
1655 		old_set_skipmode_ixlp->label = new_set_skipmode_ixlp->label;
1656 		old_set_skipmode_ixlp->compiler_privatep =
1657 		    (ixl1394_priv_t)uvp->skipxferp;
1658 		break;
1659 	}
1660 	case IXL1394_OP_SET_TAGSYNC_U: {
1661 		ixl1394_set_tagsync_t *old_set_tagsync_ixlp;
1662 		ixl1394_set_tagsync_t *new_set_tagsync_ixlp;
1663 
1664 		old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
1665 		new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
1666 
1667 		/*
1668 		 * iterate through set of descriptor blocks for associated IXL
1669 		 * xfer command and set new pkthdr1 value into output more/last
1670 		 * immediate hci descriptor (first/last hci descriptor of each
1671 		 * descriptor block)
1672 		 */
1673 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1674 			hcidescp = (hci1394_desc_imm_t *)
1675 			    xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
1676 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1677 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1678 			ddi_put32(acc_hdl, &hcidescp->q1, uvp->pkthdr1);
1679 
1680 			/* Sync descriptor for device (desc was modified) */
1681 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1682 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1683 			if (err != DDI_SUCCESS) {
1684 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1685 
1686 				TNF_PROBE_0_DEBUG(
1687 				    hci1394_ixl_update_perform_exit,
1688 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1689 				return (DDI_FAILURE);
1690 			}
1691 		}
1692 
1693 		/*
1694 		 * set old ixl set tagsync cmd tag & sync from new ixl set
1695 		 * tagsync cmd
1696 		 */
1697 		old_set_tagsync_ixlp->tag = new_set_tagsync_ixlp->tag;
1698 		old_set_tagsync_ixlp->sync = new_set_tagsync_ixlp->sync;
1699 		break;
1700 	}
1701 	case IXL1394_OP_RECV_PKT_U:
1702 	case IXL1394_OP_RECV_PKT_ST_U: {
1703 		ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1704 		ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1705 		uint32_t	   desc_status;
1706 
1707 		old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1708 		new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1709 
1710 		/*
1711 		 * alter buffer address, count and rescount in ixl recv pkt cmd
1712 		 * related hci component in dma descriptor block
1713 		 */
1714 		hcidescp = (hci1394_desc_imm_t *)
1715 		    xferctlp->dma[0].dma_descp - uvp->hci_offset;
1716 		acc_hdl	 = xferctlp->dma[0].dma_buf->bi_handle;
1717 		dma_hdl	 = xferctlp->dma[0].dma_buf->bi_dma_handle;
1718 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1719 		ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1720 
1721 		/* Sync the descriptor before we grab the status */
1722 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1723 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1724 		if (err != DDI_SUCCESS) {
1725 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1726 
1727 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1728 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1729 			return (DDI_FAILURE);
1730 		}
1731 
1732 		/* change only low 1/2 word and leave status bits unchanged */
1733 		desc_status = ddi_get32(acc_hdl, &hcidescp->status);
1734 		desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
1735 		    uvp->hcistatus;
1736 		ddi_put32(acc_hdl, &hcidescp->status, desc_status);
1737 
1738 		/* Sync descriptor for device (desc was modified) */
1739 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1740 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1741 		if (err != DDI_SUCCESS) {
1742 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1743 
1744 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1745 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1746 			return (DDI_FAILURE);
1747 		}
1748 
1749 		/*
1750 		 * set old ixl recv pkt size and buffers from new
1751 		 * ixl recv pkt command
1752 		 */
1753 		old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
1754 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
1755 		    new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1756 		old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
1757 		break;
1758 	}
1759 	case IXL1394_OP_RECV_BUF_U: {
1760 		ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1761 		ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1762 		uint32_t	   desc_hdr;
1763 		uint32_t	   desc_status;
1764 
1765 		old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1766 		new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1767 
1768 		/*
1769 		 * iterate through set of descriptor blocks for this IXL xfer
1770 		 * command altering buffer, count and rescount in each
1771 		 * input more/last(the only) hci descriptor block descriptor.
1772 		 */
1773 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1774 
1775 			hcidescp = (hci1394_desc_imm_t *)
1776 			    xferctlp->dma[ii].dma_descp - uvp->hci_offset;
1777 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1778 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1779 
1780 			ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1781 
1782 			/*
1783 			 * advance to next buffer segment, adjust over header
1784 			 * if appropriate
1785 			 */
1786 			uvp->bufaddr += uvp->bufsize;
1787 
1788 			/* Sync the descriptor before we grab the header(s) */
1789 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1790 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1791 			if (err != DDI_SUCCESS) {
1792 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1793 
1794 				TNF_PROBE_0_DEBUG(
1795 				    hci1394_ixl_update_perform_exit,
1796 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1797 				return (DDI_FAILURE);
1798 			}
1799 
1800 			/*
1801 			 * this preserves interrupt enable bits, et al. in each
1802 			 * descriptor block header.
1803 			 */
1804 			desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1805 			desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
1806 			    uvp->hcihdr;
1807 			ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
1808 
1809 			/*
1810 			 * change only low 1/2 word leaving status bits
1811 			 * unchanged
1812 			 */
1813 			desc_status = ddi_get32(acc_hdl, &hcidescp->status);
1814 			desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
1815 			    uvp->hcistatus;
1816 			ddi_put32(acc_hdl, &hcidescp->status, desc_status);
1817 
1818 			/* Sync descriptor for device (desc was modified) */
1819 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1820 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1821 			if (err != DDI_SUCCESS) {
1822 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1823 
1824 				TNF_PROBE_0_DEBUG(
1825 				    hci1394_ixl_update_perform_exit,
1826 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1827 				return (DDI_FAILURE);
1828 			}
1829 		}
1830 
1831 		/*
1832 		 * set old ixl recv buf sizes and buffers from
1833 		 * new ixl recv pkt cmd
1834 		 */
1835 		old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
1836 		old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
1837 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
1838 		    new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1839 		old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
1840 		break;
1841 	}
1842 	case IXL1394_OP_SEND_PKT_U:
1843 	case IXL1394_OP_SEND_PKT_ST_U:
1844 	case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
1845 		ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1846 		ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1847 
1848 		old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1849 		new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1850 
1851 		/*
1852 		 * replace pkthdr2 in output more immediate (the first) hci
1853 		 * descriptor in block, then alter buffer address and count in
1854 		 * IXL send pkt command related output more/last hci descriptor.
1855 		 */
1856 		hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
1857 		    uvp->hdr_offset;
1858 		acc_hdl	 = xferctlp->dma[0].dma_buf->bi_handle;
1859 		dma_hdl	 = xferctlp->dma[0].dma_buf->bi_dma_handle;
1860 
1861 		ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
1862 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1863 		ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1864 
1865 		/* Sync descriptor for device (desc was modified) */
1866 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1867 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1868 		if (err != DDI_SUCCESS) {
1869 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1870 
1871 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1872 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1873 			return (DDI_FAILURE);
1874 		}
1875 
1876 		/*
1877 		 * set old ixl recv pkt size and buffers from
1878 		 * new ixl recv pkt cmd
1879 		 */
1880 		old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
1881 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
1882 		    new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1883 		old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
1884 		break;
1885 	}
1886 	case IXL1394_OP_SEND_BUF_U: {
1887 		ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1888 		ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1889 		uint32_t	   desc_hdr;
1890 
1891 		old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1892 		new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1893 
1894 		/*
1895 		 * iterate through set of descriptor blocks for this IXL xfer
1896 		 * command replacing pkthdr2 in output more immediate
1897 		 * (the first) hci descriptor block descriptor, then altering
1898 		 * buffer address and count in each output last (the only other)
1899 		 * hci descriptor block descriptor.
1900 		 */
1901 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1902 			hcidescp = (hci1394_desc_imm_t *)
1903 			    xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
1904 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1905 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1906 
1907 			ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
1908 			ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1909 
1910 			/* advance to next buffer segment */
1911 			uvp->bufaddr += uvp->bufsize;
1912 
1913 			/* Sync the descriptor before we grab the header(s) */
1914 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1915 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1916 			if (err != DDI_SUCCESS) {
1917 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1918 
1919 				TNF_PROBE_0_DEBUG(
1920 				    hci1394_ixl_update_perform_exit,
1921 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1922 				return (DDI_FAILURE);
1923 			}
1924 
1925 			/*
1926 			 * this preserves interrupt enable bits, et al
1927 			 * in each desc block hdr
1928 			 */
1929 			desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1930 			desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
1931 			    uvp->hcihdr;
1932 			ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
1933 
1934 			/* Sync descriptor for device (desc was modified) */
1935 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1936 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1937 			if (err != DDI_SUCCESS) {
1938 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1939 
1940 				TNF_PROBE_0_DEBUG(
1941 				    hci1394_ixl_update_perform_exit,
1942 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1943 				return (DDI_FAILURE);
1944 			}
1945 		}
1946 
1947 		/*
1948 		 * set old ixl recv buf sizes and buffers from
1949 		 * new ixl recv pkt cmd
1950 		 */
1951 		old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
1952 		old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
1953 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
1954 		    new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1955 		old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
1956 		break;
1957 	}
1958 	default:
1959 		/* ixl command being updated must be one of above, else error */
1960 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1961 
1962 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1963 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1964 		return (DDI_FAILURE);
1965 	}
1966 
1967 	/* hit the WAKE bit in the context control register */
1968 	if (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) {
1969 		HCI1394_IRCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
1970 		    0, 0, 0, 0, 0, 1 /* wake */);
1971 	} else {
1972 		HCI1394_ITCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
1973 		    0, 0, 0, 1 /* wake */);
1974 	}
1975 
1976 	/* perform update completed successfully */
1977 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1978 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1979 	return (DDI_SUCCESS);
1980 }
1981 
1982 /*
1983  * hci1394_ixl_update_evaluate()
1984  *    Evaluate where the hardware is in running through the DMA descriptor
1985  *    blocks.
1986  */
1987 static int
1988 hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp)
1989 {
1990 	hci1394_iso_ctxt_t	*ctxtp;
1991 	ixl1394_command_t	*ixlp;
1992 	int			ixldepth;
1993 	int			ii;
1994 
1995 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_enter,
1996 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1997 
1998 	ctxtp = uvp->ctxtp;
1999 
2000 	ixlp = NULL;
2001 	ixldepth = 0xFFFFFFFF;
2002 
2003 	/*
2004 	 * repeat until IXL execution status evaluation function returns error
2005 	 * or until pointer to currently executing IXL command and its depth
2006 	 * stablize
2007 	 */
2008 	while ((ixlp != ctxtp->ixl_execp) ||
2009 	    (ixldepth != ctxtp->ixl_exec_depth)) {
2010 
2011 		ixlp = ctxtp->ixl_execp;
2012 		ixldepth = ctxtp->ixl_exec_depth;
2013 
2014 		/*
2015 		 * call IXL execution status evaluation (ixl_dma_sync)
2016 		 * function returning if error (HCI1394_IXL_INTR_DMALOST is
2017 		 * only error condition).
2018 		 *
2019 		 * Note: interrupt processing function can only return one of
2020 		 * the following statuses here:
2021 		 *    HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP,
2022 		 *    HCI1394_IXL_INTR_DMALOST
2023 		 *
2024 		 * it can not return the following status here:
2025 		 *    HCI1394_IXL_INTR_NOADV
2026 		 *
2027 		 * Don't need to grab the lock here... for the same reason
2028 		 * explained in hci1394_ixl_update_endup() above.
2029 		 */
2030 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
2031 		if (hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp) ==
2032 		    HCI1394_IXL_INTR_DMALOST) {
2033 
2034 			/* return post-perform update failed status */
2035 			uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
2036 
2037 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
2038 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2039 			return (DDI_FAILURE);
2040 		}
2041 	}
2042 
2043 	/*
2044 	 * if the currently executing IXL command is one of the IXL_MAX_LOCN
2045 	 * locations saved before update was performed, return update
2046 	 * successful status.
2047 	 */
2048 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
2049 		if ((uvp->locn_info[ii].ixlp == ixlp) &&
2050 		    (uvp->locn_info[ii].ixldepth == ixldepth)) {
2051 
2052 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
2053 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2054 			return (DDI_SUCCESS);
2055 		}
2056 	}
2057 
2058 	/*
2059 	 * else return post-perform update failed status.
2060 	 * note: later can make more sophisticated evaluations about where
2061 	 * execution processing went, and if update has really failed.
2062 	 */
2063 	uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
2064 
2065 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
2066 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2067 	return (DDI_FAILURE);
2068 }
2069 
2070 /*
2071  * hci1394_ixl_update_analysis()
2072  *    Determine if the hardware is within the range we expected it to be.
2073  *    If so the update succeeded.
2074  */
2075 static int
2076 hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp)
2077 {
2078 	hci1394_iso_ctxt_t	*ctxtp;
2079 	ixl1394_command_t	*ixlp;
2080 	int			ixldepth;
2081 	int			ii;
2082 	int			status;
2083 
2084 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_enter,
2085 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2086 
2087 	ctxtp = uvp->ctxtp;
2088 
2089 	ixlp = NULL;
2090 	ixldepth = 0xFFFFFFFF;
2091 
2092 	/*
2093 	 * repeat until ixl execution status evaluation function returns error
2094 	 * or until pointer to currently executing ixl command and its depth
2095 	 * stablize.
2096 	 */
2097 	while ((ixlp != ctxtp->ixl_execp) ||
2098 	    (ixldepth != ctxtp->ixl_exec_depth)) {
2099 
2100 		ixlp = ctxtp->ixl_execp;
2101 		ixldepth = ctxtp->ixl_exec_depth;
2102 
2103 		/*
2104 		 * call ixl execution status evaluation (interrupt processing).
2105 		 * set IXL1394_EIDU_PRE_UPD_DMALOST if status INTR_DMALOST and
2106 		 * return.
2107 		 *
2108 		 * Note: interrupt processing function can only return one of
2109 		 * the following statuses here:
2110 		 *    HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP or
2111 		 *    HCI1394_IXL_INTR_DMALOST
2112 		 *
2113 		 * it can not return the following status here:
2114 		 *    HCI1394_IXL_INTR_NOADV
2115 		 *
2116 		 * Don't need to grab the lock here... for the same reason
2117 		 * explained in hci1394_ixl_update_endup() above.
2118 		 */
2119 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
2120 
2121 		status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
2122 		if (status == HCI1394_IXL_INTR_DMALOST) {
2123 			/*
2124 			 * set pre-update dma processing lost status and
2125 			 * return error
2126 			 */
2127 			uvp->upd_status = IXL1394_EPRE_UPD_DMALOST;
2128 
2129 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
2130 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2131 			return (DDI_FAILURE);
2132 		}
2133 	}
2134 
2135 	/*
2136 	 * save locations of currently executing ixl command and the
2137 	 * 3 following it.
2138 	 */
2139 	hci1394_ixl_update_set_locn_info(uvp);
2140 
2141 	/*
2142 	 * if xfer_ixl_cmd associated with the IXL_command being updated is one
2143 	 * of the saved (currently executing) IXL commands, risk is too great to
2144 	 * perform update now, set IXL1394_ERISK_PROHIBITS_UPD status and
2145 	 * return error.
2146 	 *
2147 	 * Note: later can implement more sophisticated risk override
2148 	 * evaluations and processing.
2149 	 */
2150 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
2151 
2152 		if ((uvp->locn_info[ii].ixlp == uvp->ixlxferp) &&
2153 		    (uvp->locn_info[ii].ixldepth >= uvp->ixldepth) &&
2154 		    (uvp->locn_info[ii].ixldepth <
2155 		    (uvp->ixldepth + uvp->ixlcount))) {
2156 
2157 			uvp->upd_status = IXL1394_ERISK_PROHIBITS_UPD;
2158 
2159 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
2160 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2161 			return (DDI_FAILURE);
2162 		}
2163 	}
2164 
2165 	/* is save for update to be performed, return ok status */
2166 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
2167 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2168 	return (DDI_SUCCESS);
2169 }
2170 
2171 /*
2172  * hci1394_ixl_update_set_locn_info()
2173  *    set up the local list of the IXL_MAX_LOCN next commandPtr locations we
2174  *    expect the hardware to get to in the next 125 microseconds.
2175  */
2176 static void
2177 hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp)
2178 {
2179 	hci1394_iso_ctxt_t	*ctxtp;
2180 	ixl1394_command_t	*ixlp;
2181 	int			ixldepth;
2182 	int			ii;
2183 
2184 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_set_locn_info_enter,
2185 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2186 
2187 	/*
2188 	 * find next xfer start ixl command, starting with current ixl command
2189 	 * where execution last left off
2190 	 */
2191 	ctxtp = uvp->ctxtp;
2192 
2193 	ixldepth = ctxtp->ixl_exec_depth;
2194 	(void) hci1394_ixl_find_next_exec_xfer(ctxtp->ixl_execp, NULL, &ixlp);
2195 
2196 	/*
2197 	 * if the current IXL command wasn't a xfer start command, then reset
2198 	 * the depth to 0 for xfer command found
2199 	 */
2200 	if (ixlp != ctxtp->ixl_execp)
2201 		ixldepth = 0;
2202 
2203 	/*
2204 	 * save xfer start IXL command & its depth and also save location and
2205 	 * depth of the next IXL_MAX_LOCN-1 xfer start IXL commands following
2206 	 * it (if any)
2207 	 */
2208 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
2209 		uvp->locn_info[ii].ixlp = ixlp;
2210 		uvp->locn_info[ii].ixldepth = ixldepth;
2211 
2212 		if (ixlp) {
2213 			/*
2214 			 * if more dma commands generated by this xfer command
2215 			 * still follow, use them. else, find the next xfer
2216 			 * start IXL command and set its depth to 0.
2217 			 */
2218 			if (++ixldepth >= ((hci1394_xfer_ctl_t *)
2219 			    ixlp->compiler_privatep)->cnt) {
2220 
2221 				(void) hci1394_ixl_find_next_exec_xfer(
2222 				    ixlp->next_ixlp, NULL, &ixlp);
2223 				ixldepth = 0;
2224 			}
2225 		}
2226 	}
2227 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_set_locn_info_exit,
2228 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2229 }
2230