1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_isr.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_init.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57     uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60     uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62     uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65     uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67     uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69     uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71     ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73     ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75     ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77     uint32_t *, uint32_t *);
78 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79     ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83     ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85     uint32_t *, uint32_t *);
86 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87     ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_els_passthru_entry(ql_adapter_state_t *,
89     els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *,
91     uint32_t *, uint32_t *);
92 static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags);
93 
94 /*
95  * ql_isr
96  *	Process all INTX intr types.
97  *
98  * Input:
99  *	arg1:	adapter state pointer.
100  *
101  * Returns:
102  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
103  *
104  * Context:
105  *	Interrupt or Kernel context, no mailbox commands allowed.
106  */
107 /* ARGSUSED */
108 uint_t
109 ql_isr(caddr_t arg1)
110 {
111 	return (ql_isr_aif(arg1, 0));
112 }
113 
114 /*
115  * ql_isr_default
116  *	Process unknown/unvectored intr types
117  *
118  * Input:
119  *	arg1:	adapter state pointer.
120  *	arg2:	interrupt vector.
121  *
122  * Returns:
123  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
124  *
125  * Context:
126  *	Interrupt or Kernel context, no mailbox commands allowed.
127  */
128 /* ARGSUSED */
129 uint_t
130 ql_isr_default(caddr_t arg1, caddr_t arg2)
131 {
132 	ql_adapter_state_t	*ha = (void *)arg1;
133 
134 	EL(ha, "isr_default called: idx=%x\n", arg2);
135 	return (ql_isr_aif(arg1, arg2));
136 }
137 
138 /*
139  * ql_isr_aif
140  *	Process mailbox and I/O command completions.
141  *
142  * Input:
143  *	arg:	adapter state pointer.
144  *	intvec:	interrupt vector.
145  *
146  * Returns:
147  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
148  *
149  * Context:
150  *	Interrupt or Kernel context, no mailbox commands allowed.
151  */
152 /* ARGSUSED */
153 uint_t
154 ql_isr_aif(caddr_t arg, caddr_t intvec)
155 {
156 	uint16_t		mbx;
157 	uint32_t		stat;
158 	ql_adapter_state_t	*ha = (void *)arg;
159 	uint32_t		set_flags = 0;
160 	uint32_t		reset_flags = 0;
161 	ql_head_t		isr_done_q = {NULL, NULL};
162 	uint_t			rval = DDI_INTR_UNCLAIMED;
163 	int			spurious_intr = 0;
164 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
165 	int			intr_loop = 4;
166 
167 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
168 
169 	QL_PM_LOCK(ha);
170 	if (ha->power_level != PM_LEVEL_D0) {
171 		/*
172 		 * Looks like we are about to go down soon, exit early.
173 		 */
174 		QL_PM_UNLOCK(ha);
175 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
176 		return (DDI_INTR_UNCLAIMED);
177 	}
178 	ha->busy++;
179 	QL_PM_UNLOCK(ha);
180 
181 	/* Acquire interrupt lock. */
182 	INTR_LOCK(ha);
183 
184 	if (CFG_IST(ha, CFG_CTRL_2200)) {
185 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
186 			/* Reset idle timer. */
187 			ha->idle_timer = 0;
188 			rval = DDI_INTR_CLAIMED;
189 			if (intr_loop) {
190 				intr_loop--;
191 			}
192 
193 			/* Special Fast Post 2200. */
194 			stat = 0;
195 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
196 			    ha->flags & ONLINE) {
197 				ql_srb_t	*sp;
198 
199 				mbx = RD16_IO_REG(ha, mailbox[23]);
200 
201 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
202 					/* Release mailbox registers. */
203 					WRT16_IO_REG(ha, semaphore, 0);
204 
205 					if (intr_loop) {
206 						WRT16_IO_REG(ha, hccr,
207 						    HC_CLR_RISC_INT);
208 					}
209 
210 					/* Get handle. */
211 					mbx >>= 4;
212 					stat = mbx & OSC_INDEX_MASK;
213 
214 					/* Validate handle. */
215 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
216 					    ha->outstanding_cmds[stat] : NULL;
217 
218 					if (sp != NULL && (sp->handle & 0xfff)
219 					    == mbx) {
220 						ha->outstanding_cmds[stat] =
221 						    NULL;
222 						sp->handle = 0;
223 						sp->flags &=
224 						    ~SRB_IN_TOKEN_ARRAY;
225 
226 						/* Set completed status. */
227 						sp->flags |= SRB_ISP_COMPLETED;
228 
229 						/* Set completion status */
230 						sp->pkt->pkt_reason =
231 						    CS_COMPLETE;
232 
233 						ql_fast_fcp_post(sp);
234 					} else if (mbx !=
235 					    (QL_FCA_BRAND & 0xfff)) {
236 						if (sp == NULL) {
237 							EL(ha, "unknown IOCB"
238 							    " handle=%xh\n",
239 							    mbx);
240 						} else {
241 							EL(ha, "mismatch IOCB"
242 							    " handle pkt=%xh, "
243 							    "sp=%xh\n", mbx,
244 							    sp->handle & 0xfff);
245 						}
246 
247 						(void) ql_binary_fw_dump(ha,
248 						    FALSE);
249 
250 						if (!(ha->task_daemon_flags &
251 						    (ISP_ABORT_NEEDED |
252 						    ABORT_ISP_ACTIVE))) {
253 							EL(ha, "ISP Invalid "
254 							    "handle, "
255 							    "isp_abort_needed"
256 							    "\n");
257 							set_flags |=
258 							    ISP_ABORT_NEEDED;
259 						}
260 					}
261 				}
262 			}
263 
264 			if (stat == 0) {
265 				/* Check for mailbox interrupt. */
266 				mbx = RD16_IO_REG(ha, semaphore);
267 				if (mbx & BIT_0) {
268 					/* Release mailbox registers. */
269 					WRT16_IO_REG(ha, semaphore, 0);
270 
271 					/* Get mailbox data. */
272 					mbx = RD16_IO_REG(ha, mailbox[0]);
273 					if (mbx > 0x3fff && mbx < 0x8000) {
274 						ql_mbx_completion(ha, mbx,
275 						    &set_flags, &reset_flags,
276 						    intr_loop);
277 					} else if (mbx > 0x7fff &&
278 					    mbx < 0xc000) {
279 						ql_async_event(ha, mbx,
280 						    &isr_done_q, &set_flags,
281 						    &reset_flags, intr_loop);
282 					} else {
283 						EL(ha, "UNKNOWN interrupt "
284 						    "type\n");
285 						intr = B_TRUE;
286 					}
287 				} else {
288 					ha->isp_rsp_index = RD16_IO_REG(ha,
289 					    resp_in);
290 
291 					if (ha->isp_rsp_index !=
292 					    ha->rsp_ring_index) {
293 						ql_response_pkt(ha,
294 						    &isr_done_q, &set_flags,
295 						    &reset_flags, intr_loop);
296 					} else if (++spurious_intr ==
297 					    MAX_SPURIOUS_INTR) {
298 						/*
299 						 * Process excessive
300 						 * spurious intrrupts
301 						 */
302 						ql_spurious_intr(ha,
303 						    intr_loop);
304 						EL(ha, "excessive spurious "
305 						    "interrupts, "
306 						    "isp_abort_needed\n");
307 						set_flags |= ISP_ABORT_NEEDED;
308 					} else {
309 						intr = B_TRUE;
310 					}
311 				}
312 			}
313 
314 			/* Clear RISC interrupt */
315 			if (intr || intr_loop == 0) {
316 				intr = B_FALSE;
317 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
318 			}
319 
320 			if (set_flags != 0 || reset_flags != 0) {
321 				TASK_DAEMON_LOCK(ha);
322 				ha->task_daemon_flags |= set_flags;
323 				ha->task_daemon_flags &= ~reset_flags;
324 				TASK_DAEMON_UNLOCK(ha);
325 				set_flags = 0;
326 				reset_flags = 0;
327 				daemon = B_TRUE;
328 			}
329 		}
330 	} else {
331 		while ((stat = RD32_IO_REG(ha, intr_info_lo)) & RH_RISC_INT) {
332 			/* Capture FW defined interrupt info */
333 			mbx = MSW(stat);
334 
335 			/* Reset idle timer. */
336 			ha->idle_timer = 0;
337 			rval = DDI_INTR_CLAIMED;
338 			if (intr_loop) {
339 				intr_loop--;
340 			}
341 
342 			switch (stat & 0x1ff) {
343 			case ROM_MBX_SUCCESS:
344 			case ROM_MBX_ERR:
345 				ql_mbx_completion(ha, mbx, &set_flags,
346 				    &reset_flags, intr_loop);
347 
348 				/* Release mailbox registers. */
349 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
350 					WRT16_IO_REG(ha, semaphore, 0);
351 				}
352 				break;
353 
354 			case MBX_SUCCESS:
355 			case MBX_ERR:
356 				/* Sun FW, Release mailbox registers. */
357 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
358 					WRT16_IO_REG(ha, semaphore, 0);
359 				}
360 				ql_mbx_completion(ha, mbx, &set_flags,
361 				    &reset_flags, intr_loop);
362 				break;
363 
364 			case ASYNC_EVENT:
365 				/* Sun FW, Release mailbox registers. */
366 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
367 					WRT16_IO_REG(ha, semaphore, 0);
368 				}
369 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
370 				    &set_flags, &reset_flags, intr_loop);
371 				break;
372 
373 			case RESP_UPDATE:
374 				if (mbx != ha->rsp_ring_index) {
375 					ha->isp_rsp_index = mbx;
376 					ql_response_pkt(ha, &isr_done_q,
377 					    &set_flags, &reset_flags,
378 					    intr_loop);
379 				} else if (++spurious_intr ==
380 				    MAX_SPURIOUS_INTR) {
381 					/* Process excessive spurious intr. */
382 					ql_spurious_intr(ha, intr_loop);
383 					EL(ha, "excessive spurious "
384 					    "interrupts, isp_abort_needed\n");
385 					set_flags |= ISP_ABORT_NEEDED;
386 				} else {
387 					intr = B_TRUE;
388 				}
389 				break;
390 
391 			case SCSI_FAST_POST_16:
392 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
393 				ql_async_event(ha, stat, &isr_done_q,
394 				    &set_flags, &reset_flags, intr_loop);
395 				break;
396 
397 			case SCSI_FAST_POST_32:
398 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
399 				ql_async_event(ha, stat, &isr_done_q,
400 				    &set_flags, &reset_flags, intr_loop);
401 				break;
402 
403 			case CTIO_FAST_POST:
404 				stat = (stat & 0xffff0000) |
405 				    MBA_CTIO_COMPLETION;
406 				ql_async_event(ha, stat, &isr_done_q,
407 				    &set_flags, &reset_flags, intr_loop);
408 				break;
409 
410 			case IP_FAST_POST_XMT:
411 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
412 				ql_async_event(ha, stat, &isr_done_q,
413 				    &set_flags, &reset_flags, intr_loop);
414 				break;
415 
416 			case IP_FAST_POST_RCV:
417 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
418 				ql_async_event(ha, stat, &isr_done_q,
419 				    &set_flags, &reset_flags, intr_loop);
420 				break;
421 
422 			case IP_FAST_POST_BRD:
423 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
424 				ql_async_event(ha, stat, &isr_done_q,
425 				    &set_flags, &reset_flags, intr_loop);
426 				break;
427 
428 			case IP_FAST_POST_RCV_ALN:
429 				stat = (stat & 0xffff0000) |
430 				    MBA_IP_HDR_DATA_SPLIT;
431 				ql_async_event(ha, stat, &isr_done_q,
432 				    &set_flags, &reset_flags, intr_loop);
433 				break;
434 
435 			case ATIO_UPDATE:
436 				EL(ha, "unsupported ATIO queue update"
437 				    " interrupt, status=%xh\n", stat);
438 				intr = B_TRUE;
439 				break;
440 
441 			case ATIO_RESP_UPDATE:
442 				EL(ha, "unsupported ATIO response queue "
443 				    "update interrupt, status=%xh\n", stat);
444 				intr = B_TRUE;
445 				break;
446 
447 			default:
448 				ql_handle_uncommon_risc_intr(ha, stat,
449 				    &set_flags);
450 				intr = B_TRUE;
451 				break;
452 			}
453 
454 			/* Clear RISC interrupt */
455 			if (intr || intr_loop == 0) {
456 				intr = B_FALSE;
457 				CFG_IST(ha, CFG_CTRL_242581) ?
458 				    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
459 				    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
460 			}
461 
462 			if (set_flags != 0 || reset_flags != 0) {
463 				TASK_DAEMON_LOCK(ha);
464 				ha->task_daemon_flags |= set_flags;
465 				ha->task_daemon_flags &= ~reset_flags;
466 				TASK_DAEMON_UNLOCK(ha);
467 				set_flags = 0;
468 				reset_flags = 0;
469 				daemon = B_TRUE;
470 			}
471 
472 			if (ha->flags & PARITY_ERROR) {
473 				EL(ha, "parity/pause exit\n");
474 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
475 				break;
476 			}
477 		}
478 	}
479 
480 	/* Process claimed interrupts during polls. */
481 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
482 		ha->intr_claimed = B_FALSE;
483 		rval = DDI_INTR_CLAIMED;
484 	}
485 
486 	/* Release interrupt lock. */
487 	INTR_UNLOCK(ha);
488 
489 	if (daemon) {
490 		ql_awaken_task_daemon(ha, NULL, 0, 0);
491 	}
492 
493 	if (isr_done_q.first != NULL) {
494 		ql_done(isr_done_q.first);
495 	}
496 
497 	if (rval == DDI_INTR_CLAIMED) {
498 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
499 		ha->xioctl->TotalInterrupts++;
500 	} else {
501 		/*EMPTY*/
502 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
503 		    ha->instance);
504 	}
505 
506 	QL_PM_LOCK(ha);
507 	ha->busy--;
508 	QL_PM_UNLOCK(ha);
509 
510 	return (rval);
511 }
512 
513 /*
514  * ql_handle_uncommon_risc_intr
515  *	Handle an uncommon RISC interrupt.
516  *
517  * Input:
518  *	ha:		adapter state pointer.
519  *	stat:		interrupt status
520  *
521  * Context:
522  *	Interrupt or Kernel context, no mailbox commands allowed.
523  */
524 static void
525 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
526     uint32_t *set_flags)
527 {
528 	uint16_t	hccr_reg;
529 
530 	hccr_reg = RD16_IO_REG(ha, hccr);
531 
532 	if (stat & RH_RISC_PAUSED ||
533 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
534 
535 		ADAPTER_STATE_LOCK(ha);
536 		ha->flags |= PARITY_ERROR;
537 		ADAPTER_STATE_UNLOCK(ha);
538 
539 		if (ha->parity_pause_errors == 0 ||
540 		    ha->parity_hccr_err != hccr_reg ||
541 		    ha->parity_stat_err != stat) {
542 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
543 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
544 			    ha->instance, hccr_reg, stat,
545 			    ha->parity_pause_errors);
546 			ha->parity_hccr_err = hccr_reg;
547 			ha->parity_stat_err = stat;
548 		}
549 
550 		EL(ha, "parity/pause error, isp_abort_needed\n");
551 
552 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
553 			ql_reset_chip(ha);
554 		}
555 
556 		if (ha->parity_pause_errors == 0) {
557 			(void) ql_flash_errlog(ha, FLASH_ERRLOG_PARITY_ERR,
558 			    0, MSW(stat), LSW(stat));
559 		}
560 
561 		if (ha->parity_pause_errors < 0xffffffff) {
562 			ha->parity_pause_errors++;
563 		}
564 
565 		*set_flags |= ISP_ABORT_NEEDED;
566 
567 		/* Disable ISP interrupts. */
568 		WRT16_IO_REG(ha, ictrl, 0);
569 		ADAPTER_STATE_LOCK(ha);
570 		ha->flags &= ~INTERRUPTS_ENABLED;
571 		ADAPTER_STATE_UNLOCK(ha);
572 	} else {
573 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
574 		    stat, hccr_reg);
575 	}
576 }
577 
578 /*
579  * ql_spurious_intr
580  *	Inform Solaris of spurious interrupts.
581  *
582  * Input:
583  *	ha:		adapter state pointer.
584  *	intr_clr:	early interrupt clear
585  *
586  * Context:
587  *	Interrupt or Kernel context, no mailbox commands allowed.
588  */
589 static void
590 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
591 {
592 	ddi_devstate_t	state;
593 
594 	EL(ha, "Spurious interrupt\n");
595 
596 	/* Disable ISP interrupts. */
597 	WRT16_IO_REG(ha, ictrl, 0);
598 	ADAPTER_STATE_LOCK(ha);
599 	ha->flags &= ~INTERRUPTS_ENABLED;
600 	ADAPTER_STATE_UNLOCK(ha);
601 
602 	/* Clear RISC interrupt */
603 	if (intr_clr) {
604 		CFG_IST(ha, CFG_CTRL_242581) ?
605 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
606 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
607 	}
608 
609 	state = ddi_get_devstate(ha->dip);
610 	if (state == DDI_DEVSTATE_UP) {
611 		/*EMPTY*/
612 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
613 		    DDI_DEVICE_FAULT, "spurious interrupts");
614 	}
615 }
616 
617 /*
618  * ql_mbx_completion
619  *	Processes mailbox completions.
620  *
621  * Input:
622  *	ha:		adapter state pointer.
623  *	mb0:		Mailbox 0 contents.
624  *	set_flags:	task daemon flags to set.
625  *	reset_flags:	task daemon flags to reset.
626  *	intr_clr:	early interrupt clear
627  *
628  * Context:
629  *	Interrupt context.
630  */
631 /* ARGSUSED */
632 static void
633 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
634     uint32_t *reset_flags, int intr_clr)
635 {
636 	uint32_t	index;
637 	uint16_t	cnt;
638 
639 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
640 
641 	/* Load return mailbox registers. */
642 	MBX_REGISTER_LOCK(ha);
643 
644 	if (ha->mcp != NULL) {
645 		ha->mcp->mb[0] = mb0;
646 		index = ha->mcp->in_mb & ~MBX_0;
647 
648 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
649 			index >>= 1;
650 			if (index & MBX_0) {
651 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
652 				    mailbox[cnt]);
653 			}
654 		}
655 
656 	} else {
657 		EL(ha, "mcp == NULL\n");
658 	}
659 
660 	if (intr_clr) {
661 		/* Clear RISC interrupt. */
662 		CFG_IST(ha, CFG_CTRL_242581) ?
663 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
664 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
665 	}
666 
667 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
668 	if (ha->flags & INTERRUPTS_ENABLED) {
669 		cv_broadcast(&ha->cv_mbx_intr);
670 	}
671 
672 	MBX_REGISTER_UNLOCK(ha);
673 
674 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
675 }
676 
677 /*
678  * ql_async_event
679  *	Processes asynchronous events.
680  *
681  * Input:
682  *	ha:		adapter state pointer.
683  *	mbx:		Mailbox 0 register.
684  *	done_q:		head pointer to done queue.
685  *	set_flags:	task daemon flags to set.
686  *	reset_flags:	task daemon flags to reset.
687  *	intr_clr:	early interrupt clear
688  *
689  * Context:
690  *	Interrupt or Kernel context, no mailbox commands allowed.
691  */
692 static void
693 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
694     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
695 {
696 	uint32_t		handle;
697 	uint32_t		index;
698 	uint16_t		cnt;
699 	uint16_t		mb[MAX_MBOX_COUNT];
700 	ql_srb_t		*sp;
701 	port_id_t		s_id;
702 	ql_tgt_t		*tq;
703 	boolean_t		intr = B_TRUE;
704 	ql_adapter_state_t	*vha;
705 
706 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
707 
708 	/* Setup to process fast completion. */
709 	mb[0] = LSW(mbx);
710 	switch (mb[0]) {
711 	case MBA_SCSI_COMPLETION:
712 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox[1]),
713 		    RD16_IO_REG(ha, mailbox[2]));
714 		break;
715 
716 	case MBA_CMPLT_1_16BIT:
717 		handle = MSW(mbx);
718 		mb[0] = MBA_SCSI_COMPLETION;
719 		break;
720 
721 	case MBA_CMPLT_1_32BIT:
722 		handle = SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
723 		mb[0] = MBA_SCSI_COMPLETION;
724 		break;
725 
726 	case MBA_CTIO_COMPLETION:
727 	case MBA_IP_COMPLETION:
728 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
729 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2])) :
730 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
731 		mb[0] = MBA_SCSI_COMPLETION;
732 		break;
733 
734 	default:
735 		break;
736 	}
737 
738 	/* Handle asynchronous event */
739 	switch (mb[0]) {
740 	case MBA_SCSI_COMPLETION:
741 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
742 		    ha->instance);
743 
744 		if (intr_clr) {
745 			/* Clear RISC interrupt */
746 			CFG_IST(ha, CFG_CTRL_242581) ?
747 			    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
748 			    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
749 			intr = B_FALSE;
750 		}
751 
752 		if ((ha->flags & ONLINE) == 0) {
753 			break;
754 		}
755 
756 		/* Get handle. */
757 		index = handle & OSC_INDEX_MASK;
758 
759 		/* Validate handle. */
760 		sp = index < MAX_OUTSTANDING_COMMANDS ?
761 		    ha->outstanding_cmds[index] : NULL;
762 
763 		if (sp != NULL && sp->handle == handle) {
764 			ha->outstanding_cmds[index] = NULL;
765 			sp->handle = 0;
766 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
767 
768 			/* Set completed status. */
769 			sp->flags |= SRB_ISP_COMPLETED;
770 
771 			/* Set completion status */
772 			sp->pkt->pkt_reason = CS_COMPLETE;
773 
774 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
775 				/* Place block on done queue */
776 				ql_add_link_b(done_q, &sp->cmd);
777 			} else {
778 				ql_fast_fcp_post(sp);
779 			}
780 		} else if (handle != QL_FCA_BRAND) {
781 			if (sp == NULL) {
782 				EL(ha, "%xh unknown IOCB handle=%xh\n",
783 				    mb[0], handle);
784 			} else {
785 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
786 				    "sp=%xh\n", mb[0], handle, sp->handle);
787 			}
788 
789 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
790 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
791 			    RD16_IO_REG(ha, mailbox[1]),
792 			    RD16_IO_REG(ha, mailbox[2]),
793 			    RD16_IO_REG(ha, mailbox[3]),
794 			    RD16_IO_REG(ha, mailbox[6]),
795 			    RD16_IO_REG(ha, mailbox[7]));
796 
797 			(void) ql_binary_fw_dump(ha, FALSE);
798 
799 			if (!(ha->task_daemon_flags &
800 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
801 				EL(ha, "%xh ISP Invalid handle, "
802 				    "isp_abort_needed\n", mb[0]);
803 				*set_flags |= ISP_ABORT_NEEDED;
804 			}
805 		}
806 		break;
807 
808 	case MBA_RESET:		/* Reset */
809 		EL(ha, "%xh Reset received\n", mb[0]);
810 		*set_flags |= RESET_MARKER_NEEDED;
811 		break;
812 
813 	case MBA_SYSTEM_ERR:		/* System Error */
814 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
815 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
816 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
817 		mb[7] = RD16_IO_REG(ha, mailbox[7]);
818 
819 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
820 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
821 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
822 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
823 		    RD16_IO_REG(ha, mailbox[4]), RD16_IO_REG(ha, mailbox[5]),
824 		    RD16_IO_REG(ha, mailbox[6]), mb[7],
825 		    RD16_IO_REG(ha, mailbox[8]), RD16_IO_REG(ha, mailbox[9]),
826 		    RD16_IO_REG(ha, mailbox[10]), RD16_IO_REG(ha, mailbox[11]),
827 		    RD16_IO_REG(ha, mailbox[12]));
828 
829 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
830 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
831 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
832 		    mb[0], RD16_IO_REG(ha, mailbox[13]),
833 		    RD16_IO_REG(ha, mailbox[14]), RD16_IO_REG(ha, mailbox[15]),
834 		    RD16_IO_REG(ha, mailbox[16]), RD16_IO_REG(ha, mailbox[17]),
835 		    RD16_IO_REG(ha, mailbox[18]), RD16_IO_REG(ha, mailbox[19]),
836 		    RD16_IO_REG(ha, mailbox[20]), RD16_IO_REG(ha, mailbox[21]),
837 		    RD16_IO_REG(ha, mailbox[22]),
838 		    RD16_IO_REG(ha, mailbox[23]));
839 
840 		if (ha->reg_off->mbox_cnt > 24) {
841 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
842 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
843 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
844 			    RD16_IO_REG(ha, mailbox[24]),
845 			    RD16_IO_REG(ha, mailbox[25]),
846 			    RD16_IO_REG(ha, mailbox[26]),
847 			    RD16_IO_REG(ha, mailbox[27]),
848 			    RD16_IO_REG(ha, mailbox[28]),
849 			    RD16_IO_REG(ha, mailbox[29]),
850 			    RD16_IO_REG(ha, mailbox[30]),
851 			    RD16_IO_REG(ha, mailbox[31]));
852 		}
853 
854 		(void) ql_binary_fw_dump(ha, FALSE);
855 
856 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
857 		    mb[2], mb[3]);
858 
859 		if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
860 			ADAPTER_STATE_LOCK(ha);
861 			ha->flags |= MPI_RESET_NEEDED;
862 			ADAPTER_STATE_UNLOCK(ha);
863 		}
864 
865 		*set_flags |= ISP_ABORT_NEEDED;
866 		ha->xioctl->ControllerErrorCount++;
867 		break;
868 
869 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
870 		EL(ha, "%xh Request Transfer Error received, "
871 		    "isp_abort_needed\n", mb[0]);
872 
873 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
874 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
875 		    RD16_IO_REG(ha, mailbox[3]));
876 
877 		*set_flags |= ISP_ABORT_NEEDED;
878 		ha->xioctl->ControllerErrorCount++;
879 		break;
880 
881 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
882 		EL(ha, "%xh Response Transfer Error received,"
883 		    " isp_abort_needed\n", mb[0]);
884 
885 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
886 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
887 		    RD16_IO_REG(ha, mailbox[3]));
888 
889 		*set_flags |= ISP_ABORT_NEEDED;
890 		ha->xioctl->ControllerErrorCount++;
891 		break;
892 
893 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
894 		EL(ha, "%xh Request Queue Wake-up received\n",
895 		    mb[0]);
896 		break;
897 
898 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
899 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
900 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
901 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
902 
903 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
904 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
905 
906 		switch (mb[1]) {
907 		case MLA_LOGIN_OPERATIONAL_FW:
908 			ADAPTER_STATE_LOCK(ha);
909 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
910 			ADAPTER_STATE_UNLOCK(ha);
911 			break;
912 		case MLA_PANIC_RECOVERY:
913 		case MLA_LOGIN_DIAGNOSTIC_FW:
914 		case MLA_LOGIN_GOLDEN_FW:
915 		case MLA_REJECT_RESPONSE:
916 		default:
917 			break;
918 		}
919 		break;
920 
921 	case MBA_LIP_F8:	/* Received a LIP F8. */
922 	case MBA_LIP_RESET:	/* LIP reset occurred. */
923 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
924 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
925 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
926 			    "\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
927 			    RD16_IO_REG(ha, mailbox[2]));
928 		} else {
929 			EL(ha, "%xh LIP received\n", mb[0]);
930 		}
931 
932 		ADAPTER_STATE_LOCK(ha);
933 		ha->flags &= ~POINT_TO_POINT;
934 		ADAPTER_STATE_UNLOCK(ha);
935 
936 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
937 			*set_flags |= LOOP_DOWN;
938 		}
939 		ql_port_state(ha, FC_STATE_OFFLINE,
940 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
941 
942 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
943 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
944 		}
945 
946 		ha->adapter_stats->lip_count++;
947 
948 		/* Update AEN queue. */
949 		ha->xioctl->TotalLipResets++;
950 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
951 			ql_enqueue_aen(ha, mb[0], NULL);
952 		}
953 		break;
954 
955 	case MBA_LOOP_UP:
956 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
957 		    CFG_CTRL_242581))) {
958 			mb[1] = RD16_IO_REG(ha, mailbox[1]);
959 			if (mb[1] == IIDMA_RATE_1GB) {		/* 1GB */
960 				ha->state = FC_PORT_STATE_MASK(
961 				    ha->state) | FC_STATE_1GBIT_SPEED;
962 				index = 1;
963 			} else if (mb[1] == IIDMA_RATE_2GB) {	/* 2GB */
964 				ha->state = FC_PORT_STATE_MASK(
965 				    ha->state) | FC_STATE_2GBIT_SPEED;
966 				index = 2;
967 			} else if (mb[1] == IIDMA_RATE_4GB) {	/* 4GB */
968 				ha->state = FC_PORT_STATE_MASK(
969 				    ha->state) | FC_STATE_4GBIT_SPEED;
970 				index = 4;
971 			} else if (mb[1] == IIDMA_RATE_8GB) {	/* 8GB */
972 				ha->state = FC_PORT_STATE_MASK(
973 				    ha->state) | FC_STATE_8GBIT_SPEED;
974 				index = 8;
975 			} else if (mb[1] == IIDMA_RATE_10GB) {	/* 10GB */
976 				ha->state = FC_PORT_STATE_MASK(
977 				    ha->state) | FC_STATE_10GBIT_SPEED;
978 				index = 10;
979 			} else {
980 				ha->state = FC_PORT_STATE_MASK(
981 				    ha->state);
982 				index = 0;
983 			}
984 		} else {
985 			ha->state = FC_PORT_STATE_MASK(ha->state) |
986 			    FC_STATE_FULL_SPEED;
987 			index = 1;
988 		}
989 
990 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
991 			vha->state = FC_PORT_STATE_MASK(vha->state) |
992 			    FC_PORT_SPEED_MASK(ha->state);
993 		}
994 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
995 
996 		/* Update AEN queue. */
997 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
998 			ql_enqueue_aen(ha, mb[0], NULL);
999 		}
1000 		break;
1001 
1002 	case MBA_LOOP_DOWN:
1003 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1004 		    "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1005 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]),
1006 		    RD16_IO_REG(ha, mailbox[4]));
1007 
1008 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1009 			*set_flags |= LOOP_DOWN;
1010 		}
1011 		ql_port_state(ha, FC_STATE_OFFLINE,
1012 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1013 
1014 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1015 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1016 		}
1017 
1018 		if (CFG_IST(ha, CFG_CTRL_2581)) {
1019 			ha->sfp_stat = RD16_IO_REG(ha, mailbox[2]);
1020 		}
1021 
1022 		/* Update AEN queue. */
1023 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1024 			ql_enqueue_aen(ha, mb[0], NULL);
1025 		}
1026 		break;
1027 
1028 	case MBA_PORT_UPDATE:
1029 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1030 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1031 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1032 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1033 
1034 		/* Locate port state structure. */
1035 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1036 			if (vha->vp_index == LSB(mb[3])) {
1037 				break;
1038 			}
1039 		}
1040 		if (vha == NULL) {
1041 			break;
1042 		}
1043 		/*
1044 		 * In N port 2 N port topology the FW provides a port
1045 		 * database entry at loop_id 0x7fe which we use to
1046 		 * acquire the Ports WWPN.
1047 		 */
1048 		if ((mb[1] != 0x7fe) &&
1049 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1050 		    (CFG_IST(ha, CFG_CTRL_242581) &&
1051 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1052 			EL(ha, "%xh Port Database Update, Login/Logout "
1053 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1054 			    mb[0], mb[1], mb[2], mb[3]);
1055 		} else if (CFG_IST(ha, CFG_CTRL_81XX) && mb[1] == 0xffff &&
1056 		    mb[2] == 0x7 && MSB(mb[3]) == 0xe) {
1057 			if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1058 				*set_flags |= LOOP_DOWN;
1059 			}
1060 			ql_port_state(ha, FC_STATE_OFFLINE,
1061 			    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1062 
1063 			if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1064 				ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1065 			}
1066 		} else {
1067 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1068 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1069 			    mb[3]);
1070 			*set_flags |= LOOP_RESYNC_NEEDED;
1071 			*set_flags &= ~LOOP_DOWN;
1072 			*reset_flags |= LOOP_DOWN;
1073 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1074 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1075 			TASK_DAEMON_LOCK(ha);
1076 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1077 			vha->task_daemon_flags &= ~LOOP_DOWN;
1078 			TASK_DAEMON_UNLOCK(ha);
1079 			ADAPTER_STATE_LOCK(ha);
1080 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1081 			ADAPTER_STATE_UNLOCK(ha);
1082 		}
1083 
1084 		/* Update AEN queue. */
1085 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1086 			ql_enqueue_aen(ha, mb[0], NULL);
1087 		}
1088 		break;
1089 
1090 	case MBA_RSCN_UPDATE:
1091 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1092 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1093 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1094 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1095 
1096 		/* Locate port state structure. */
1097 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1098 			if (vha->vp_index == LSB(mb[3])) {
1099 				break;
1100 			}
1101 		}
1102 
1103 		if (vha == NULL) {
1104 			break;
1105 		}
1106 
1107 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1108 		    MSB(mb[2]) == vha->d_id.b.area &&
1109 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1110 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1111 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1112 		} else {
1113 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1114 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1115 			if (FC_PORT_STATE_MASK(vha->state) !=
1116 			    FC_STATE_OFFLINE) {
1117 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1118 				TASK_DAEMON_LOCK(ha);
1119 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1120 				TASK_DAEMON_UNLOCK(ha);
1121 				*set_flags |= RSCN_UPDATE_NEEDED;
1122 			}
1123 		}
1124 
1125 		/* Update AEN queue. */
1126 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1127 			ql_enqueue_aen(ha, mb[0], NULL);
1128 		}
1129 		break;
1130 
1131 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1132 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1133 		    RD16_IO_REG(ha, mailbox[1]));
1134 		break;
1135 
1136 	case MBA_IP_RECEIVE:
1137 	case MBA_IP_BROADCAST:
1138 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1139 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1140 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
1141 
1142 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1143 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1144 
1145 		/* Locate device queue. */
1146 		s_id.b.al_pa = LSB(mb[2]);
1147 		s_id.b.area = MSB(mb[2]);
1148 		s_id.b.domain = LSB(mb[1]);
1149 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1150 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1151 			break;
1152 		}
1153 
1154 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1155 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1156 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1157 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1158 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1159 
1160 		tq->ub_sequence_length = mb[3];
1161 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1162 		if (mb[3] % cnt) {
1163 			tq->ub_total_seg_cnt++;
1164 		}
1165 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1166 
1167 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1168 		    index++) {
1169 			mb[index] = RD16_IO_REG(ha, mailbox[index]);
1170 		}
1171 
1172 		tq->ub_seq_id = ++ha->ub_seq_id;
1173 		tq->ub_seq_cnt = 0;
1174 		tq->ub_frame_ro = 0;
1175 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1176 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
1177 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1178 		ha->rcv_dev_q = tq;
1179 
1180 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1181 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1182 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1183 			    QL_SUCCESS) {
1184 				EL(ha, "ql_ub_frame_hdr failed, "
1185 				    "isp_abort_needed\n");
1186 				*set_flags |= ISP_ABORT_NEEDED;
1187 				break;
1188 			}
1189 		}
1190 		break;
1191 
1192 	case MBA_IP_LOW_WATER_MARK:
1193 	case MBA_IP_RCV_BUFFER_EMPTY:
1194 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1195 		    mb[0]);
1196 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1197 		break;
1198 
1199 	case MBA_IP_HDR_DATA_SPLIT:
1200 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1201 		break;
1202 
1203 	case MBA_ERROR_LOGGING_DISABLED:
1204 		EL(ha, "%xh error logging disabled received, "
1205 		    "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]));
1206 		break;
1207 
1208 	case MBA_POINT_TO_POINT:
1209 	/* case MBA_DCBX_COMPLETED: */
1210 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1211 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1212 		} else {
1213 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1214 		}
1215 		ADAPTER_STATE_LOCK(ha);
1216 		ha->flags |= POINT_TO_POINT;
1217 		ADAPTER_STATE_UNLOCK(ha);
1218 		break;
1219 
1220 	case MBA_FCF_CONFIG_ERROR:
1221 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1222 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1223 		break;
1224 
1225 	case MBA_DCBX_PARAM_CHANGED:
1226 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1227 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1228 		break;
1229 
1230 	case MBA_CHG_IN_CONNECTION:
1231 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1232 		if (mb[1] == 2) {
1233 			EL(ha, "%xh Change In Connection received, "
1234 			    "mbx1=%xh\n",  mb[0], mb[1]);
1235 			ADAPTER_STATE_LOCK(ha);
1236 			ha->flags &= ~POINT_TO_POINT;
1237 			ADAPTER_STATE_UNLOCK(ha);
1238 			if (ha->topology & QL_N_PORT) {
1239 				ha->topology = (uint8_t)(ha->topology &
1240 				    ~QL_N_PORT);
1241 				ha->topology = (uint8_t)(ha->topology |
1242 				    QL_NL_PORT);
1243 			}
1244 		} else {
1245 			EL(ha, "%xh Change In Connection received, "
1246 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1247 			*set_flags |= ISP_ABORT_NEEDED;
1248 		}
1249 		break;
1250 
1251 	case MBA_ZIO_UPDATE:
1252 		EL(ha, "%xh ZIO response received\n", mb[0]);
1253 
1254 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1255 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1256 		intr = B_FALSE;
1257 		break;
1258 
1259 	case MBA_PORT_BYPASS_CHANGED:
1260 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1261 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1262 		/*
1263 		 * Event generated when there is a transition on
1264 		 * port bypass of crystal+.
1265 		 * Mailbox 1:	Bit 0 - External.
1266 		 *		Bit 2 - Internal.
1267 		 * When the bit is 0, the port is bypassed.
1268 		 *
1269 		 * For now we will generate a LIP for all cases.
1270 		 */
1271 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1272 		break;
1273 
1274 	case MBA_RECEIVE_ERROR:
1275 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1276 		    mb[0], RD16_IO_REG(ha, mailbox[1]),
1277 		    RD16_IO_REG(ha, mailbox[2]));
1278 		break;
1279 
1280 	case MBA_LS_RJT_SENT:
1281 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1282 		    RD16_IO_REG(ha, mailbox[1]));
1283 		break;
1284 
1285 	case MBA_FW_RESTART_COMP:
1286 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1287 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1288 		break;
1289 
1290 	case MBA_IDC_COMPLETE:
1291 		EL(ha, "%xh Inter-driver communication complete received, "
1292 		    "mbx1=%xh, mbx2=%xh\n", mb[0],
1293 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1294 		break;
1295 
1296 	case MBA_IDC_NOTIFICATION:
1297 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox[1]);
1298 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox[2]);
1299 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox[3]);
1300 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox[4]);
1301 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox[5]);
1302 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox[6]);
1303 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox[7]);
1304 		EL(ha, "%xh Inter-driver communication request notification "
1305 		    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1306 		    "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1307 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1308 		    ha->idc_mb[6], ha->idc_mb[7]);
1309 		*set_flags |= IDC_ACK_NEEDED;
1310 		break;
1311 
1312 	case MBA_IDC_TIME_EXTENDED:
1313 		EL(ha, "%xh Inter-driver communication time extended received,"
1314 		    " mbx1=%xh, mbx2=%xh\n", mb[0],
1315 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1316 		break;
1317 
1318 	default:
1319 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1320 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1321 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
1322 		break;
1323 	}
1324 
1325 	/* Clear RISC interrupt */
1326 	if (intr && intr_clr) {
1327 		CFG_IST(ha, CFG_CTRL_242581) ?
1328 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1329 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1330 	}
1331 
1332 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1333 }
1334 
1335 /*
1336  * ql_fast_fcp_post
1337  *	Fast path for good SCSI I/O completion.
1338  *
1339  * Input:
1340  *	sp:	SRB pointer.
1341  *
1342  * Context:
1343  *	Interrupt or Kernel context, no mailbox commands allowed.
1344  */
1345 static void
1346 ql_fast_fcp_post(ql_srb_t *sp)
1347 {
1348 	ql_adapter_state_t	*ha = sp->ha;
1349 	ql_lun_t		*lq = sp->lun_queue;
1350 	ql_tgt_t		*tq = lq->target_queue;
1351 
1352 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1353 
1354 	/* Acquire device queue lock. */
1355 	DEVICE_QUEUE_LOCK(tq);
1356 
1357 	/* Decrement outstanding commands on device. */
1358 	if (tq->outcnt != 0) {
1359 		tq->outcnt--;
1360 	}
1361 
1362 	if (sp->flags & SRB_FCP_CMD_PKT) {
1363 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1364 			/*
1365 			 * Clear the flag for this LUN so that
1366 			 * untagged commands can be submitted
1367 			 * for it.
1368 			 */
1369 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1370 		}
1371 
1372 		if (lq->lun_outcnt != 0) {
1373 			lq->lun_outcnt--;
1374 		}
1375 	}
1376 
1377 	/* Reset port down retry count on good completion. */
1378 	tq->port_down_retry_count = ha->port_down_retry_count;
1379 	tq->qfull_retry_count = ha->qfull_retry_count;
1380 
1381 	/* Remove command from watchdog queue. */
1382 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1383 		ql_remove_link(&tq->wdg, &sp->wdg);
1384 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1385 	}
1386 
1387 	if (lq->cmd.first != NULL) {
1388 		ql_next(ha, lq);
1389 	} else {
1390 		/* Release LU queue specific lock. */
1391 		DEVICE_QUEUE_UNLOCK(tq);
1392 		if (ha->pha->pending_cmds.first != NULL) {
1393 			ql_start_iocb(ha, NULL);
1394 		}
1395 	}
1396 
1397 	/* Sync buffers if required.  */
1398 	if (sp->flags & SRB_MS_PKT) {
1399 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1400 		    DDI_DMA_SYNC_FORCPU);
1401 	}
1402 
1403 	/* Map ISP completion codes. */
1404 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1405 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1406 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1407 
1408 	/* Now call the pkt completion callback */
1409 	if (sp->flags & SRB_POLL) {
1410 		sp->flags &= ~SRB_POLL;
1411 	} else if (sp->pkt->pkt_comp) {
1412 		INTR_UNLOCK(ha);
1413 		(*sp->pkt->pkt_comp)(sp->pkt);
1414 		INTR_LOCK(ha);
1415 	}
1416 
1417 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1418 }
1419 
1420 /*
1421  * ql_response_pkt
1422  *	Processes response entry.
1423  *
1424  * Input:
1425  *	ha:		adapter state pointer.
1426  *	done_q:		head pointer to done queue.
1427  *	set_flags:	task daemon flags to set.
1428  *	reset_flags:	task daemon flags to reset.
1429  *	intr_clr:	early interrupt clear
1430  *
1431  * Context:
1432  *	Interrupt or Kernel context, no mailbox commands allowed.
1433  */
1434 static void
1435 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1436     uint32_t *reset_flags, int intr_clr)
1437 {
1438 	response_t	*pkt;
1439 	uint32_t	dma_sync_size_1 = 0;
1440 	uint32_t	dma_sync_size_2 = 0;
1441 	int		status = 0;
1442 
1443 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1444 
1445 	/* Clear RISC interrupt */
1446 	if (intr_clr) {
1447 		CFG_IST(ha, CFG_CTRL_242581) ?
1448 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1449 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1450 	}
1451 
1452 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1453 		EL(ha, "index error = %xh, isp_abort_needed",
1454 		    ha->isp_rsp_index);
1455 		*set_flags |= ISP_ABORT_NEEDED;
1456 		return;
1457 	}
1458 
1459 	if ((ha->flags & ONLINE) == 0) {
1460 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1461 		return;
1462 	}
1463 
1464 	/* Calculate size of response queue entries to sync. */
1465 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1466 		dma_sync_size_1 = (uint32_t)
1467 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1468 		    RESPONSE_ENTRY_SIZE);
1469 	} else if (ha->isp_rsp_index == 0) {
1470 		dma_sync_size_1 = (uint32_t)
1471 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1472 		    RESPONSE_ENTRY_SIZE);
1473 	} else {
1474 		/* Responses wrap around the Q */
1475 		dma_sync_size_1 = (uint32_t)
1476 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1477 		    RESPONSE_ENTRY_SIZE);
1478 		dma_sync_size_2 = (uint32_t)
1479 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1480 	}
1481 
1482 	/* Sync DMA buffer. */
1483 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1484 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1485 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1486 	    DDI_DMA_SYNC_FORKERNEL);
1487 	if (dma_sync_size_2) {
1488 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1489 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1490 		    DDI_DMA_SYNC_FORKERNEL);
1491 	}
1492 
1493 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1494 		pkt = ha->response_ring_ptr;
1495 
1496 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1497 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1498 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1499 		    RESPONSE_ENTRY_SIZE);
1500 
1501 		/* Adjust ring index. */
1502 		ha->rsp_ring_index++;
1503 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1504 			ha->rsp_ring_index = 0;
1505 			ha->response_ring_ptr = ha->response_ring_bp;
1506 		} else {
1507 			ha->response_ring_ptr++;
1508 		}
1509 
1510 		/* Process packet. */
1511 		if (ha->status_srb != NULL && pkt->entry_type !=
1512 		    STATUS_CONT_TYPE) {
1513 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1514 			ha->status_srb = NULL;
1515 		}
1516 
1517 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1518 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1519 
1520 		if (pkt->entry_status != 0) {
1521 			ql_error_entry(ha, pkt, done_q, set_flags,
1522 			    reset_flags);
1523 		} else {
1524 			switch (pkt->entry_type) {
1525 			case STATUS_TYPE:
1526 				status |= CFG_IST(ha, CFG_CTRL_242581) ?
1527 				    ql_24xx_status_entry(ha,
1528 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1529 				    reset_flags) :
1530 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1531 				    done_q, set_flags, reset_flags);
1532 				break;
1533 			case STATUS_CONT_TYPE:
1534 				ql_status_cont_entry(ha,
1535 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1536 				    reset_flags);
1537 				break;
1538 			case IP_TYPE:
1539 			case IP_A64_TYPE:
1540 			case IP_CMD_TYPE:
1541 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1542 				    set_flags, reset_flags);
1543 				break;
1544 			case IP_RECEIVE_TYPE:
1545 				ql_ip_rcv_entry(ha,
1546 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1547 				    reset_flags);
1548 				break;
1549 			case IP_RECEIVE_CONT_TYPE:
1550 				ql_ip_rcv_cont_entry(ha,
1551 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1552 				    set_flags, reset_flags);
1553 				break;
1554 			case IP_24XX_RECEIVE_TYPE:
1555 				ql_ip_24xx_rcv_entry(ha,
1556 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1557 				    set_flags, reset_flags);
1558 				break;
1559 			case MS_TYPE:
1560 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1561 				    set_flags, reset_flags);
1562 				break;
1563 			case REPORT_ID_TYPE:
1564 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1565 				    done_q, set_flags, reset_flags);
1566 				break;
1567 			case ELS_PASSTHRU_TYPE:
1568 				ql_els_passthru_entry(ha,
1569 				    (els_passthru_entry_rsp_t *)pkt,
1570 				    done_q, set_flags, reset_flags);
1571 				break;
1572 			case IP_BUF_POOL_TYPE:
1573 			case MARKER_TYPE:
1574 			case VP_MODIFY_TYPE:
1575 			case VP_CONTROL_TYPE:
1576 				break;
1577 			default:
1578 				EL(ha, "Unknown IOCB entry type=%xh\n",
1579 				    pkt->entry_type);
1580 				break;
1581 			}
1582 		}
1583 	}
1584 
1585 	/* Inform RISC of processed responses. */
1586 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1587 
1588 	/* RESET packet received delay for possible async event. */
1589 	if (status & BIT_0) {
1590 		drv_usecwait(500000);
1591 	}
1592 
1593 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1594 }
1595 
1596 /*
1597  * ql_error_entry
1598  *	Processes error entry.
1599  *
1600  * Input:
1601  *	ha = adapter state pointer.
1602  *	pkt = entry pointer.
1603  *	done_q = head pointer to done queue.
1604  *	set_flags = task daemon flags to set.
1605  *	reset_flags = task daemon flags to reset.
1606  *
1607  * Context:
1608  *	Interrupt or Kernel context, no mailbox commands allowed.
1609  */
1610 /* ARGSUSED */
1611 static void
1612 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1613     uint32_t *set_flags, uint32_t *reset_flags)
1614 {
1615 	ql_srb_t	*sp;
1616 	uint32_t	index, resp_identifier;
1617 
1618 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1619 		EL(ha, "Aborted command\n");
1620 		return;
1621 	}
1622 
1623 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1624 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1625 
1626 	if (pkt->entry_status & BIT_6) {
1627 		EL(ha, "Request Queue DMA error\n");
1628 	} else if (pkt->entry_status & BIT_5) {
1629 		EL(ha, "Invalid Entry Order\n");
1630 	} else if (pkt->entry_status & BIT_4) {
1631 		EL(ha, "Invalid Entry Count\n");
1632 	} else if (pkt->entry_status & BIT_3) {
1633 		EL(ha, "Invalid Entry Parameter\n");
1634 	} else if (pkt->entry_status & BIT_2) {
1635 		EL(ha, "Invalid Entry Type\n");
1636 	} else if (pkt->entry_status & BIT_1) {
1637 		EL(ha, "Busy\n");
1638 	} else {
1639 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1640 	}
1641 
1642 	/* Validate the response entry handle. */
1643 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1644 	index = resp_identifier & OSC_INDEX_MASK;
1645 	if (index < MAX_OUTSTANDING_COMMANDS) {
1646 		/* the index seems reasonable */
1647 		sp = ha->outstanding_cmds[index];
1648 		if (sp != NULL) {
1649 			if (sp->handle == resp_identifier) {
1650 				/* Neo, you're the one... */
1651 				ha->outstanding_cmds[index] = NULL;
1652 				sp->handle = 0;
1653 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1654 			} else {
1655 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1656 				    resp_identifier, sp->handle);
1657 				sp = NULL;
1658 				ql_signal_abort(ha, set_flags);
1659 			}
1660 		} else {
1661 			sp = ql_verify_preprocessed_cmd(ha,
1662 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1663 		}
1664 	} else {
1665 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1666 		    index, resp_identifier);
1667 		ql_signal_abort(ha, set_flags);
1668 	}
1669 
1670 	if (sp != NULL) {
1671 		/* Bad payload or header */
1672 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1673 			/* Bad payload or header, set error status. */
1674 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1675 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1676 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1677 		} else {
1678 			/* Set error status. */
1679 			sp->pkt->pkt_reason = CS_UNKNOWN;
1680 		}
1681 
1682 		/* Set completed status. */
1683 		sp->flags |= SRB_ISP_COMPLETED;
1684 
1685 		/* Place command on done queue. */
1686 		ql_add_link_b(done_q, &sp->cmd);
1687 
1688 	}
1689 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1690 }
1691 
1692 /*
1693  * ql_status_entry
1694  *	Processes received ISP2200-2300 status entry.
1695  *
1696  * Input:
1697  *	ha:		adapter state pointer.
1698  *	pkt:		entry pointer.
1699  *	done_q:		done queue pointer.
1700  *	set_flags:	task daemon flags to set.
1701  *	reset_flags:	task daemon flags to reset.
1702  *
1703  * Returns:
1704  *	BIT_0 = CS_RESET status received.
1705  *
1706  * Context:
1707  *	Interrupt or Kernel context, no mailbox commands allowed.
1708  */
1709 /* ARGSUSED */
1710 static int
1711 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1712     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1713 {
1714 	ql_srb_t		*sp;
1715 	uint32_t		index, resp_identifier;
1716 	uint16_t		comp_status;
1717 	int			rval = 0;
1718 
1719 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1720 
1721 	/* Validate the response entry handle. */
1722 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1723 	index = resp_identifier & OSC_INDEX_MASK;
1724 	if (index < MAX_OUTSTANDING_COMMANDS) {
1725 		/* the index seems reasonable */
1726 		sp = ha->outstanding_cmds[index];
1727 		if (sp != NULL) {
1728 			if (sp->handle == resp_identifier) {
1729 				/* Neo, you're the one... */
1730 				ha->outstanding_cmds[index] = NULL;
1731 				sp->handle = 0;
1732 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1733 			} else {
1734 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1735 				    resp_identifier, sp->handle);
1736 				sp = NULL;
1737 				ql_signal_abort(ha, set_flags);
1738 			}
1739 		} else {
1740 			sp = ql_verify_preprocessed_cmd(ha,
1741 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1742 		}
1743 	} else {
1744 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1745 		    index, resp_identifier);
1746 		ql_signal_abort(ha, set_flags);
1747 	}
1748 
1749 	if (sp != NULL) {
1750 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1751 		    &pkt->comp_status);
1752 
1753 		/*
1754 		 * We dont care about SCSI QFULLs.
1755 		 */
1756 		if (comp_status == CS_QUEUE_FULL) {
1757 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1758 			    sp->lun_queue->target_queue->d_id.b24,
1759 			    sp->lun_queue->lun_no);
1760 			comp_status = CS_COMPLETE;
1761 		}
1762 
1763 		/*
1764 		 * 2300 firmware marks completion status as data underrun
1765 		 * for scsi qfulls. Make it transport complete.
1766 		 */
1767 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1768 		    (comp_status == CS_DATA_UNDERRUN) &&
1769 		    (pkt->scsi_status_l != 0)) {
1770 			comp_status = CS_COMPLETE;
1771 		}
1772 
1773 		/*
1774 		 * Workaround T3 issue where we do not get any data xferred
1775 		 * but get back a good status.
1776 		 */
1777 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1778 		    comp_status == CS_COMPLETE &&
1779 		    pkt->scsi_status_l == 0 &&
1780 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1781 		    pkt->residual_length == 0 &&
1782 		    sp->fcp &&
1783 		    sp->fcp->fcp_data_len != 0 &&
1784 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1785 		    SF_DATA_OUT) {
1786 			comp_status = CS_ABORTED;
1787 		}
1788 
1789 		if (sp->flags & SRB_MS_PKT) {
1790 			/*
1791 			 * Ideally it should never be true. But there
1792 			 * is a bug in FW which upon receiving invalid
1793 			 * parameters in MS IOCB returns it as
1794 			 * status entry and not as ms entry type.
1795 			 */
1796 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1797 			    set_flags, reset_flags);
1798 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1799 			    ha->instance);
1800 			return (0);
1801 		}
1802 
1803 		/*
1804 		 * Fast path to good SCSI I/O completion
1805 		 */
1806 		if ((comp_status == CS_COMPLETE) &
1807 		    (!pkt->scsi_status_l) &
1808 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1809 			/* Set completed status. */
1810 			sp->flags |= SRB_ISP_COMPLETED;
1811 			sp->pkt->pkt_reason = comp_status;
1812 			ql_fast_fcp_post(sp);
1813 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1814 			    ha->instance);
1815 			return (0);
1816 		}
1817 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1818 		    reset_flags);
1819 	}
1820 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1821 
1822 	return (rval);
1823 }
1824 
1825 /*
1826  * ql_24xx_status_entry
1827  *	Processes received ISP24xx status entry.
1828  *
1829  * Input:
1830  *	ha:		adapter state pointer.
1831  *	pkt:		entry pointer.
1832  *	done_q:		done queue pointer.
1833  *	set_flags:	task daemon flags to set.
1834  *	reset_flags:	task daemon flags to reset.
1835  *
1836  * Returns:
1837  *	BIT_0 = CS_RESET status received.
1838  *
1839  * Context:
1840  *	Interrupt or Kernel context, no mailbox commands allowed.
1841  */
1842 /* ARGSUSED */
1843 static int
1844 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1845     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1846 {
1847 	ql_srb_t		*sp = NULL;
1848 	uint16_t		comp_status;
1849 	uint32_t		index, resp_identifier;
1850 	int			rval = 0;
1851 
1852 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1853 
1854 	/* Validate the response entry handle. */
1855 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1856 	index = resp_identifier & OSC_INDEX_MASK;
1857 	if (index < MAX_OUTSTANDING_COMMANDS) {
1858 		/* the index seems reasonable */
1859 		sp = ha->outstanding_cmds[index];
1860 		if (sp != NULL) {
1861 			if (sp->handle == resp_identifier) {
1862 				/* Neo, you're the one... */
1863 				ha->outstanding_cmds[index] = NULL;
1864 				sp->handle = 0;
1865 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1866 			} else {
1867 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1868 				    resp_identifier, sp->handle);
1869 				sp = NULL;
1870 				ql_signal_abort(ha, set_flags);
1871 			}
1872 		} else {
1873 			sp = ql_verify_preprocessed_cmd(ha,
1874 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1875 		}
1876 	} else {
1877 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1878 		    index, resp_identifier);
1879 		ql_signal_abort(ha, set_flags);
1880 	}
1881 
1882 	if (sp != NULL) {
1883 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1884 		    &pkt->comp_status);
1885 
1886 		/* We dont care about SCSI QFULLs. */
1887 		if (comp_status == CS_QUEUE_FULL) {
1888 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1889 			    sp->lun_queue->target_queue->d_id.b24,
1890 			    sp->lun_queue->lun_no);
1891 			comp_status = CS_COMPLETE;
1892 		}
1893 
1894 		/*
1895 		 * 2300 firmware marks completion status as data underrun
1896 		 * for scsi qfulls. Make it transport complete.
1897 		 */
1898 		if ((comp_status == CS_DATA_UNDERRUN) &&
1899 		    (pkt->scsi_status_l != 0)) {
1900 			comp_status = CS_COMPLETE;
1901 		}
1902 
1903 		/*
1904 		 * Workaround T3 issue where we do not get any data xferred
1905 		 * but get back a good status.
1906 		 */
1907 		if (comp_status == CS_COMPLETE &&
1908 		    pkt->scsi_status_l == 0 &&
1909 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1910 		    pkt->residual_length != 0 &&
1911 		    sp->fcp &&
1912 		    sp->fcp->fcp_data_len != 0 &&
1913 		    sp->fcp->fcp_cntl.cntl_write_data) {
1914 			comp_status = CS_ABORTED;
1915 		}
1916 
1917 		/*
1918 		 * Fast path to good SCSI I/O completion
1919 		 */
1920 		if ((comp_status == CS_COMPLETE) &
1921 		    (!pkt->scsi_status_l) &
1922 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1923 			/* Set completed status. */
1924 			sp->flags |= SRB_ISP_COMPLETED;
1925 			sp->pkt->pkt_reason = comp_status;
1926 			ql_fast_fcp_post(sp);
1927 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1928 			    ha->instance);
1929 			return (0);
1930 		}
1931 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
1932 		    set_flags, reset_flags);
1933 	}
1934 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1935 
1936 	return (rval);
1937 }
1938 
1939 /*
1940  * ql_verify_preprocessed_cmd
1941  *	Handles preprocessed cmds..
1942  *
1943  * Input:
1944  *	ha:		adapter state pointer.
1945  *	pkt_handle:	handle pointer.
1946  *	set_flags:	task daemon flags to set.
1947  *	reset_flags:	task daemon flags to reset.
1948  *
1949  * Returns:
1950  *	srb pointer or NULL
1951  *
1952  * Context:
1953  *	Interrupt or Kernel context, no mailbox commands allowed.
1954  */
1955 /* ARGSUSED */
1956 ql_srb_t *
1957 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle,
1958     uint32_t *set_flags, uint32_t *reset_flags)
1959 {
1960 	ql_srb_t		*sp = NULL;
1961 	uint32_t		index, resp_identifier;
1962 	uint32_t		get_handle = 10;
1963 
1964 	while (get_handle) {
1965 		/* Get handle. */
1966 		resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle);
1967 		index = resp_identifier & OSC_INDEX_MASK;
1968 		/* Validate handle. */
1969 		if (index < MAX_OUTSTANDING_COMMANDS) {
1970 			sp = ha->outstanding_cmds[index];
1971 		}
1972 
1973 		if (sp != NULL) {
1974 			EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
1975 			    resp_identifier, get_handle, index);
1976 			break;
1977 		} else {
1978 			get_handle -= 1;
1979 			drv_usecwait(10000);
1980 			if (get_handle == 1) {
1981 				/* Last chance, Sync whole DMA buffer. */
1982 				(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1983 				    RESPONSE_Q_BUFFER_OFFSET,
1984 				    RESPONSE_QUEUE_SIZE,
1985 				    DDI_DMA_SYNC_FORKERNEL);
1986 				EL(ha, "last chance DMA sync, index=%xh\n",
1987 				    index);
1988 			}
1989 		}
1990 	}
1991 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1992 
1993 	return (sp);
1994 }
1995 
1996 
1997 /*
1998  * ql_status_error
1999  *	Processes received ISP status entry error.
2000  *
2001  * Input:
2002  *	ha:		adapter state pointer.
2003  *	sp:		SRB pointer.
2004  *	pkt:		entry pointer.
2005  *	done_q:		done queue pointer.
2006  *	set_flags:	task daemon flags to set.
2007  *	reset_flags:	task daemon flags to reset.
2008  *
2009  * Returns:
2010  *	BIT_0 = CS_RESET status received.
2011  *
2012  * Context:
2013  *	Interrupt or Kernel context, no mailbox commands allowed.
2014  */
2015 /* ARGSUSED */
2016 static int
2017 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
2018     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2019 {
2020 	uint32_t		sense_sz = 0;
2021 	uint32_t		cnt;
2022 	ql_tgt_t		*tq;
2023 	fcp_rsp_t		*fcpr;
2024 	struct fcp_rsp_info	*rsp;
2025 	int			rval = 0;
2026 
2027 	struct {
2028 		uint8_t		*rsp_info;
2029 		uint8_t		*req_sense_data;
2030 		uint32_t	residual_length;
2031 		uint32_t	fcp_residual_length;
2032 		uint32_t	rsp_info_length;
2033 		uint32_t	req_sense_length;
2034 		uint16_t	comp_status;
2035 		uint8_t		state_flags_l;
2036 		uint8_t		state_flags_h;
2037 		uint8_t		scsi_status_l;
2038 		uint8_t		scsi_status_h;
2039 	} sts;
2040 
2041 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2042 
2043 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2044 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2045 
2046 		/* Setup status. */
2047 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2048 		    &pkt24->comp_status);
2049 		sts.scsi_status_l = pkt24->scsi_status_l;
2050 		sts.scsi_status_h = pkt24->scsi_status_h;
2051 
2052 		/* Setup firmware residuals. */
2053 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2054 		    ddi_get32(ha->hba_buf.acc_handle,
2055 		    (uint32_t *)&pkt24->residual_length) : 0;
2056 
2057 		/* Setup FCP residuals. */
2058 		sts.fcp_residual_length = sts.scsi_status_h &
2059 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2060 		    ddi_get32(ha->hba_buf.acc_handle,
2061 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2062 
2063 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2064 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2065 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2066 
2067 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2068 			    sts.residual_length,
2069 			    pkt24->fcp_rsp_residual_count);
2070 			sts.scsi_status_h = (uint8_t)
2071 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2072 		}
2073 
2074 		/* Setup state flags. */
2075 		sts.state_flags_l = pkt24->state_flags_l;
2076 		sts.state_flags_h = pkt24->state_flags_h;
2077 
2078 		if (sp->fcp->fcp_data_len &&
2079 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2080 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2081 			sts.state_flags_h = (uint8_t)
2082 			    (sts.state_flags_h | SF_GOT_BUS |
2083 			    SF_GOT_TARGET | SF_SENT_CMD |
2084 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2085 		} else {
2086 			sts.state_flags_h = (uint8_t)
2087 			    (sts.state_flags_h | SF_GOT_BUS |
2088 			    SF_GOT_TARGET | SF_SENT_CMD |
2089 			    SF_GOT_STATUS);
2090 		}
2091 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2092 			sts.state_flags_l = (uint8_t)
2093 			    (sts.state_flags_l | SF_DATA_OUT);
2094 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2095 			sts.state_flags_l = (uint8_t)
2096 			    (sts.state_flags_l | SF_DATA_IN);
2097 		}
2098 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2099 			sts.state_flags_l = (uint8_t)
2100 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2101 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2102 			sts.state_flags_l = (uint8_t)
2103 			    (sts.state_flags_l | SF_ORDERED_Q);
2104 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2105 			sts.state_flags_l = (uint8_t)
2106 			    (sts.state_flags_l | SF_SIMPLE_Q);
2107 		}
2108 
2109 		/* Setup FCP response info. */
2110 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2111 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2112 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2113 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2114 			if (sts.rsp_info_length >
2115 			    sizeof (struct fcp_rsp_info)) {
2116 				sts.rsp_info_length =
2117 				    sizeof (struct fcp_rsp_info);
2118 			}
2119 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2120 				ql_chg_endian(sts.rsp_info + cnt, 4);
2121 			}
2122 		} else {
2123 			sts.rsp_info_length = 0;
2124 		}
2125 
2126 		/* Setup sense data. */
2127 		sts.req_sense_data =
2128 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2129 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2130 			sts.req_sense_length =
2131 			    ddi_get32(ha->hba_buf.acc_handle,
2132 			    (uint32_t *)&pkt24->fcp_sense_length);
2133 			sts.state_flags_h = (uint8_t)
2134 			    (sts.state_flags_h | SF_ARQ_DONE);
2135 			sense_sz = (uint32_t)
2136 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2137 			    (uintptr_t)sts.req_sense_data);
2138 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2139 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2140 			}
2141 		} else {
2142 			sts.req_sense_length = 0;
2143 		}
2144 	} else {
2145 		/* Setup status. */
2146 		sts.comp_status = (uint16_t)ddi_get16(
2147 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2148 		sts.scsi_status_l = pkt23->scsi_status_l;
2149 		sts.scsi_status_h = pkt23->scsi_status_h;
2150 
2151 		/* Setup firmware residuals. */
2152 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2153 		    ddi_get32(ha->hba_buf.acc_handle,
2154 		    (uint32_t *)&pkt23->residual_length) : 0;
2155 
2156 		/* Setup FCP residuals. */
2157 		sts.fcp_residual_length = sts.scsi_status_h &
2158 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2159 		    sts.residual_length : 0;
2160 
2161 		/* Setup state flags. */
2162 		sts.state_flags_l = pkt23->state_flags_l;
2163 		sts.state_flags_h = pkt23->state_flags_h;
2164 
2165 		/* Setup FCP response info. */
2166 		sts.rsp_info = &pkt23->rsp_info[0];
2167 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2168 			sts.rsp_info_length = ddi_get16(
2169 			    ha->hba_buf.acc_handle,
2170 			    (uint16_t *)&pkt23->rsp_info_length);
2171 			if (sts.rsp_info_length >
2172 			    sizeof (struct fcp_rsp_info)) {
2173 				sts.rsp_info_length =
2174 				    sizeof (struct fcp_rsp_info);
2175 			}
2176 		} else {
2177 			sts.rsp_info_length = 0;
2178 		}
2179 
2180 		/* Setup sense data. */
2181 		sts.req_sense_data = &pkt23->req_sense_data[0];
2182 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2183 		    ddi_get16(ha->hba_buf.acc_handle,
2184 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2185 	}
2186 
2187 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2188 
2189 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2190 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2191 	    sizeof (fcp_rsp_t));
2192 
2193 	tq = sp->lun_queue->target_queue;
2194 
2195 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2196 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2197 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2198 	}
2199 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2200 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2201 	}
2202 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2203 		fcpr->fcp_u.fcp_status.resid_over = 1;
2204 	}
2205 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2206 		fcpr->fcp_u.fcp_status.resid_under = 1;
2207 	}
2208 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2209 
2210 	/* Set ISP completion status */
2211 	sp->pkt->pkt_reason = sts.comp_status;
2212 
2213 	/* Update statistics. */
2214 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2215 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2216 
2217 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2218 		if (sense_sz > sts.rsp_info_length) {
2219 			sense_sz = sts.rsp_info_length;
2220 		}
2221 
2222 		/* copy response information data. */
2223 		if (sense_sz) {
2224 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2225 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2226 		}
2227 		fcpr->fcp_response_len = sense_sz;
2228 
2229 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2230 		    fcpr->fcp_response_len);
2231 
2232 		switch (*(sts.rsp_info + 3)) {
2233 		case FCP_NO_FAILURE:
2234 			break;
2235 		case FCP_DL_LEN_MISMATCH:
2236 			ha->adapter_stats->d_stats[lobyte(
2237 			    tq->loop_id)].dl_len_mismatches++;
2238 			break;
2239 		case FCP_CMND_INVALID:
2240 			break;
2241 		case FCP_DATA_RO_MISMATCH:
2242 			ha->adapter_stats->d_stats[lobyte(
2243 			    tq->loop_id)].data_ro_mismatches++;
2244 			break;
2245 		case FCP_TASK_MGMT_NOT_SUPPTD:
2246 			break;
2247 		case FCP_TASK_MGMT_FAILED:
2248 			ha->adapter_stats->d_stats[lobyte(
2249 			    tq->loop_id)].task_mgmt_failures++;
2250 			break;
2251 		default:
2252 			break;
2253 		}
2254 	} else {
2255 		/*
2256 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2257 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2258 		 */
2259 		fcpr->fcp_response_len = 0;
2260 	}
2261 
2262 	/* Set reset status received. */
2263 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2264 		rval |= BIT_0;
2265 	}
2266 
2267 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2268 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2269 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2270 	    ha->task_daemon_flags & LOOP_DOWN) {
2271 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2272 		    tq->d_id.b24, sp->lun_queue->lun_no);
2273 
2274 		/* Set retry status. */
2275 		sp->flags |= SRB_RETRY;
2276 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2277 	    tq->port_down_retry_count != 0 &&
2278 	    (sts.comp_status == CS_INCOMPLETE ||
2279 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2280 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2281 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2282 	    sts.comp_status == CS_PORT_BUSY)) {
2283 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2284 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2285 		    tq->port_down_retry_count);
2286 
2287 		/* Set retry status. */
2288 		sp->flags |= SRB_RETRY;
2289 
2290 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2291 			/* Acquire device queue lock. */
2292 			DEVICE_QUEUE_LOCK(tq);
2293 
2294 			tq->flags |= TQF_QUEUE_SUSPENDED;
2295 
2296 			/* Decrement port down count. */
2297 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2298 				tq->port_down_retry_count--;
2299 			}
2300 
2301 			DEVICE_QUEUE_UNLOCK(tq);
2302 
2303 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2304 			    == 0 &&
2305 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2306 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2307 				sp->ha->adapter_stats->d_stats[lobyte(
2308 				    tq->loop_id)].logouts_recvd++;
2309 				ql_send_logo(sp->ha, tq, done_q);
2310 			}
2311 
2312 			ADAPTER_STATE_LOCK(ha);
2313 			if (ha->port_retry_timer == 0) {
2314 				if ((ha->port_retry_timer =
2315 				    ha->port_down_retry_delay) == 0) {
2316 					*set_flags |=
2317 					    PORT_RETRY_NEEDED;
2318 				}
2319 			}
2320 			ADAPTER_STATE_UNLOCK(ha);
2321 		}
2322 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2323 	    (sts.comp_status == CS_RESET ||
2324 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2325 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2326 		if (sts.comp_status == CS_RESET) {
2327 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2328 			    tq->d_id.b24, sp->lun_queue->lun_no);
2329 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2330 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2331 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2332 			    tq->qfull_retry_count);
2333 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2334 				tq->flags |= TQF_QUEUE_SUSPENDED;
2335 
2336 				tq->qfull_retry_count--;
2337 
2338 				ADAPTER_STATE_LOCK(ha);
2339 				if (ha->port_retry_timer == 0) {
2340 					if ((ha->port_retry_timer =
2341 					    ha->qfull_retry_delay) ==
2342 					    0) {
2343 						*set_flags |=
2344 						    PORT_RETRY_NEEDED;
2345 					}
2346 				}
2347 				ADAPTER_STATE_UNLOCK(ha);
2348 			}
2349 		} else {
2350 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2351 			    tq->d_id.b24, sp->lun_queue->lun_no);
2352 		}
2353 
2354 		/* Set retry status. */
2355 		sp->flags |= SRB_RETRY;
2356 	} else {
2357 		fcpr->fcp_resid =
2358 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2359 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2360 
2361 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2362 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2363 
2364 			if (sts.scsi_status_l == STATUS_CHECK) {
2365 				sp->pkt->pkt_reason = CS_COMPLETE;
2366 			} else {
2367 				EL(ha, "transport error - "
2368 				    "underrun & invalid resid\n");
2369 				EL(ha, "ssh=%xh, ssl=%xh\n",
2370 				    sts.scsi_status_h, sts.scsi_status_l);
2371 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2372 			}
2373 		}
2374 
2375 		/* Ignore firmware underrun error. */
2376 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2377 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2378 		    (sts.scsi_status_l != STATUS_CHECK &&
2379 		    sts.scsi_status_l != STATUS_GOOD))) {
2380 			sp->pkt->pkt_reason = CS_COMPLETE;
2381 		}
2382 
2383 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2384 			ha->xioctl->DeviceErrorCount++;
2385 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2386 			    "\n", sts.comp_status, tq->d_id.b24,
2387 			    sp->lun_queue->lun_no);
2388 		}
2389 
2390 		/* Set target request sense data. */
2391 		if (sts.scsi_status_l == STATUS_CHECK) {
2392 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2393 
2394 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2395 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2396 				    sts.req_sense_data[2] !=
2397 				    KEY_UNIT_ATTENTION) {
2398 					ha->xioctl->DeviceErrorCount++;
2399 				}
2400 
2401 				sense_sz = sts.req_sense_length;
2402 
2403 				/* Insure data does not exceed buf. */
2404 				if (sp->pkt->pkt_rsplen <=
2405 				    (uint32_t)sizeof (fcp_rsp_t) +
2406 				    fcpr->fcp_response_len) {
2407 					sp->request_sense_length = 0;
2408 				} else {
2409 					sp->request_sense_length = (uint32_t)
2410 					    (sp->pkt->pkt_rsplen -
2411 					    sizeof (fcp_rsp_t) -
2412 					    fcpr->fcp_response_len);
2413 				}
2414 
2415 				if (sense_sz <
2416 				    sp->request_sense_length) {
2417 					sp->request_sense_length =
2418 					    sense_sz;
2419 				}
2420 
2421 				sp->request_sense_ptr = (caddr_t)rsp;
2422 
2423 				sense_sz = (uint32_t)
2424 				    (((uintptr_t)pkt23 +
2425 				    sizeof (sts_entry_t)) -
2426 				    (uintptr_t)sts.req_sense_data);
2427 				if (sp->request_sense_length <
2428 				    sense_sz) {
2429 					sense_sz =
2430 					    sp->request_sense_length;
2431 				}
2432 
2433 				fcpr->fcp_sense_len = sense_sz;
2434 
2435 				/* Move sense data. */
2436 				ddi_rep_get8(ha->hba_buf.acc_handle,
2437 				    (uint8_t *)sp->request_sense_ptr,
2438 				    sts.req_sense_data,
2439 				    (size_t)sense_sz,
2440 				    DDI_DEV_AUTOINCR);
2441 
2442 				sp->request_sense_ptr += sense_sz;
2443 				sp->request_sense_length -= sense_sz;
2444 				if (sp->request_sense_length != 0) {
2445 					ha->status_srb = sp;
2446 				}
2447 			}
2448 
2449 			if (sense_sz != 0) {
2450 				EL(sp->ha, "check condition sense data, "
2451 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2452 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2453 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2454 				    sp->lun_queue->lun_no,
2455 				    sts.req_sense_data[0],
2456 				    sts.req_sense_data[1],
2457 				    sts.req_sense_data[2],
2458 				    sts.req_sense_data[3],
2459 				    sts.req_sense_data[4],
2460 				    sts.req_sense_data[5],
2461 				    sts.req_sense_data[6],
2462 				    sts.req_sense_data[7],
2463 				    sts.req_sense_data[8],
2464 				    sts.req_sense_data[9],
2465 				    sts.req_sense_data[10],
2466 				    sts.req_sense_data[11],
2467 				    sts.req_sense_data[12],
2468 				    sts.req_sense_data[13],
2469 				    sts.req_sense_data[14],
2470 				    sts.req_sense_data[15],
2471 				    sts.req_sense_data[16],
2472 				    sts.req_sense_data[17]);
2473 			} else {
2474 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2475 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2476 			}
2477 		}
2478 	}
2479 
2480 	/* Set completed status. */
2481 	sp->flags |= SRB_ISP_COMPLETED;
2482 
2483 	/* Place command on done queue. */
2484 	if (ha->status_srb == NULL) {
2485 		ql_add_link_b(done_q, &sp->cmd);
2486 	}
2487 
2488 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2489 
2490 	return (rval);
2491 }
2492 
2493 /*
2494  * ql_status_cont_entry
2495  *	Processes status continuation entry.
2496  *
2497  * Input:
2498  *	ha:		adapter state pointer.
2499  *	pkt:		entry pointer.
2500  *	done_q:		done queue pointer.
2501  *	set_flags:	task daemon flags to set.
2502  *	reset_flags:	task daemon flags to reset.
2503  *
2504  * Context:
2505  *	Interrupt or Kernel context, no mailbox commands allowed.
2506  */
2507 /* ARGSUSED */
2508 static void
2509 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2510     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2511 {
2512 	uint32_t	sense_sz, index;
2513 	ql_srb_t	*sp = ha->status_srb;
2514 
2515 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2516 
2517 	if (sp != NULL && sp->request_sense_length) {
2518 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2519 			sense_sz = sizeof (pkt->req_sense_data);
2520 		} else {
2521 			sense_sz = sp->request_sense_length;
2522 		}
2523 
2524 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2525 			for (index = 0; index < sense_sz; index += 4) {
2526 				ql_chg_endian((uint8_t *)
2527 				    &pkt->req_sense_data[0] + index, 4);
2528 			}
2529 		}
2530 
2531 		/* Move sense data. */
2532 		ddi_rep_get8(ha->hba_buf.acc_handle,
2533 		    (uint8_t *)sp->request_sense_ptr,
2534 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2535 		    DDI_DEV_AUTOINCR);
2536 
2537 		sp->request_sense_ptr += sense_sz;
2538 		sp->request_sense_length -= sense_sz;
2539 
2540 		/* Place command on done queue. */
2541 		if (sp->request_sense_length == 0) {
2542 			ql_add_link_b(done_q, &sp->cmd);
2543 			ha->status_srb = NULL;
2544 		}
2545 	}
2546 
2547 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2548 }
2549 
2550 /*
2551  * ql_ip_entry
2552  *	Processes received ISP IP entry.
2553  *
2554  * Input:
2555  *	ha:		adapter state pointer.
2556  *	pkt:		entry pointer.
2557  *	done_q:		done queue pointer.
2558  *	set_flags:	task daemon flags to set.
2559  *	reset_flags:	task daemon flags to reset.
2560  *
2561  * Context:
2562  *	Interrupt or Kernel context, no mailbox commands allowed.
2563  */
2564 /* ARGSUSED */
2565 static void
2566 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2567     uint32_t *set_flags, uint32_t *reset_flags)
2568 {
2569 	ql_srb_t	*sp;
2570 	uint32_t	index, resp_identifier;
2571 	ql_tgt_t	*tq;
2572 
2573 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2574 
2575 	/* Validate the response entry handle. */
2576 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2577 	index = resp_identifier & OSC_INDEX_MASK;
2578 	if (index < MAX_OUTSTANDING_COMMANDS) {
2579 		/* the index seems reasonable */
2580 		sp = ha->outstanding_cmds[index];
2581 		if (sp != NULL) {
2582 			if (sp->handle == resp_identifier) {
2583 				/* Neo, you're the one... */
2584 				ha->outstanding_cmds[index] = NULL;
2585 				sp->handle = 0;
2586 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2587 			} else {
2588 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2589 				    resp_identifier, sp->handle);
2590 				sp = NULL;
2591 				ql_signal_abort(ha, set_flags);
2592 			}
2593 		} else {
2594 			sp = ql_verify_preprocessed_cmd(ha,
2595 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2596 		}
2597 	} else {
2598 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2599 		    index, resp_identifier);
2600 		ql_signal_abort(ha, set_flags);
2601 	}
2602 
2603 	if (sp != NULL) {
2604 		tq = sp->lun_queue->target_queue;
2605 
2606 		/* Set ISP completion status */
2607 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2608 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2609 
2610 			sp->pkt->pkt_reason = ddi_get16(
2611 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
2612 		} else {
2613 			sp->pkt->pkt_reason = ddi_get16(
2614 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2615 		}
2616 
2617 		if (ha->task_daemon_flags & LOOP_DOWN) {
2618 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2619 			    tq->d_id.b24);
2620 
2621 			/* Set retry status. */
2622 			sp->flags |= SRB_RETRY;
2623 
2624 		} else if (tq->port_down_retry_count &&
2625 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2626 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2627 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2628 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2629 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2630 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2631 			    sp->pkt->pkt_reason, tq->d_id.b24,
2632 			    tq->port_down_retry_count);
2633 
2634 			/* Set retry status. */
2635 			sp->flags |= SRB_RETRY;
2636 
2637 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2638 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2639 				ha->adapter_stats->d_stats[lobyte(
2640 				    tq->loop_id)].logouts_recvd++;
2641 				ql_send_logo(ha, tq, done_q);
2642 			}
2643 
2644 			/* Acquire device queue lock. */
2645 			DEVICE_QUEUE_LOCK(tq);
2646 
2647 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2648 				tq->flags |= TQF_QUEUE_SUSPENDED;
2649 
2650 				tq->port_down_retry_count--;
2651 
2652 				ADAPTER_STATE_LOCK(ha);
2653 				if (ha->port_retry_timer == 0) {
2654 					if ((ha->port_retry_timer =
2655 					    ha->port_down_retry_delay) == 0) {
2656 						*set_flags |=
2657 						    PORT_RETRY_NEEDED;
2658 					}
2659 				}
2660 				ADAPTER_STATE_UNLOCK(ha);
2661 			}
2662 
2663 			/* Release device queue specific lock. */
2664 			DEVICE_QUEUE_UNLOCK(tq);
2665 
2666 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2667 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2668 
2669 			/* Set retry status. */
2670 			sp->flags |= SRB_RETRY;
2671 		} else {
2672 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2673 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2674 				    sp->pkt->pkt_reason, tq->d_id.b24);
2675 			}
2676 		}
2677 
2678 		/* Set completed status. */
2679 		sp->flags |= SRB_ISP_COMPLETED;
2680 
2681 		ql_add_link_b(done_q, &sp->cmd);
2682 
2683 	}
2684 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2685 }
2686 
2687 /*
2688  * ql_ip_rcv_entry
2689  *	Processes received ISP IP buffers entry.
2690  *
2691  * Input:
2692  *	ha:		adapter state pointer.
2693  *	pkt:		entry pointer.
2694  *	done_q:		done queue pointer.
2695  *	set_flags:	task daemon flags to set.
2696  *	reset_flags:	task daemon flags to reset.
2697  *
2698  * Context:
2699  *	Interrupt or Kernel context, no mailbox commands allowed.
2700  */
2701 /* ARGSUSED */
2702 static void
2703 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2704     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2705 {
2706 	port_id_t	s_id;
2707 	uint16_t	index;
2708 	uint8_t		cnt;
2709 	ql_tgt_t	*tq;
2710 
2711 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2712 
2713 	/* Locate device queue. */
2714 	s_id.b.al_pa = pkt->s_id[0];
2715 	s_id.b.area = pkt->s_id[1];
2716 	s_id.b.domain = pkt->s_id[2];
2717 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2718 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2719 		return;
2720 	}
2721 
2722 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2723 	    &pkt->seq_length);
2724 	tq->ub_total_seg_cnt = pkt->segment_count;
2725 	tq->ub_seq_id = ++ha->ub_seq_id;
2726 	tq->ub_seq_cnt = 0;
2727 	tq->ub_frame_ro = 0;
2728 	tq->ub_loop_id = pkt->loop_id;
2729 	ha->rcv_dev_q = tq;
2730 
2731 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2732 	    tq->ub_total_seg_cnt; cnt++) {
2733 
2734 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2735 		    &pkt->buffer_handle[cnt]);
2736 
2737 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2738 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2739 			*set_flags |= ISP_ABORT_NEEDED;
2740 			break;
2741 		}
2742 	}
2743 
2744 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2745 }
2746 
2747 /*
2748  * ql_ip_rcv_cont_entry
2749  *	Processes received ISP IP buffers continuation entry.
2750  *
2751  * Input:
2752  *	ha:		adapter state pointer.
2753  *	pkt:		entry pointer.
2754  *	done_q:		done queue pointer.
2755  *	set_flags:	task daemon flags to set.
2756  *	reset_flags:	task daemon flags to reset.
2757  *
2758  * Context:
2759  *	Interrupt or Kernel context, no mailbox commands allowed.
2760  */
2761 /* ARGSUSED */
2762 static void
2763 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2764     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2765 {
2766 	uint16_t	index;
2767 	uint8_t		cnt;
2768 	ql_tgt_t	*tq;
2769 
2770 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2771 
2772 	if ((tq = ha->rcv_dev_q) == NULL) {
2773 		EL(ha, "No IP receive device\n");
2774 		return;
2775 	}
2776 
2777 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2778 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2779 
2780 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2781 		    &pkt->buffer_handle[cnt]);
2782 
2783 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2784 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2785 			*set_flags |= ISP_ABORT_NEEDED;
2786 			break;
2787 		}
2788 	}
2789 
2790 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2791 }
2792 
2793 /*
2794  * ip_rcv_24xx_entry_t
2795  *	Processes received ISP24xx IP buffers entry.
2796  *
2797  * Input:
2798  *	ha:		adapter state pointer.
2799  *	pkt:		entry pointer.
2800  *	done_q:		done queue pointer.
2801  *	set_flags:	task daemon flags to set.
2802  *	reset_flags:	task daemon flags to reset.
2803  *
2804  * Context:
2805  *	Interrupt or Kernel context, no mailbox commands allowed.
2806  */
2807 /* ARGSUSED */
2808 static void
2809 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2810     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2811 {
2812 	port_id_t	s_id;
2813 	uint16_t	index;
2814 	uint8_t		cnt;
2815 	ql_tgt_t	*tq;
2816 
2817 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2818 
2819 	/* Locate device queue. */
2820 	s_id.b.al_pa = pkt->s_id[0];
2821 	s_id.b.area = pkt->s_id[1];
2822 	s_id.b.domain = pkt->s_id[2];
2823 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2824 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2825 		return;
2826 	}
2827 
2828 	if (tq->ub_total_seg_cnt == 0) {
2829 		tq->ub_sequence_length = (uint16_t)ddi_get16(
2830 		    ha->hba_buf.acc_handle, &pkt->seq_length);
2831 		tq->ub_total_seg_cnt = pkt->segment_count;
2832 		tq->ub_seq_id = ++ha->ub_seq_id;
2833 		tq->ub_seq_cnt = 0;
2834 		tq->ub_frame_ro = 0;
2835 		tq->ub_loop_id = (uint16_t)ddi_get16(
2836 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2837 	}
2838 
2839 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2840 	    tq->ub_total_seg_cnt; cnt++) {
2841 
2842 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2843 		    &pkt->buffer_handle[cnt]);
2844 
2845 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2846 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2847 			*set_flags |= ISP_ABORT_NEEDED;
2848 			break;
2849 		}
2850 	}
2851 
2852 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2853 }
2854 
2855 /*
2856  * ql_ms_entry
2857  *	Processes received Name/Management/CT Pass-Through entry.
2858  *
2859  * Input:
2860  *	ha:		adapter state pointer.
2861  *	pkt23:		entry pointer.
2862  *	done_q:		done queue pointer.
2863  *	set_flags:	task daemon flags to set.
2864  *	reset_flags:	task daemon flags to reset.
2865  *
2866  * Context:
2867  *	Interrupt or Kernel context, no mailbox commands allowed.
2868  */
2869 /* ARGSUSED */
2870 static void
2871 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2872     uint32_t *set_flags, uint32_t *reset_flags)
2873 {
2874 	ql_srb_t		*sp;
2875 	uint32_t		index, cnt, resp_identifier;
2876 	ql_tgt_t		*tq;
2877 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
2878 
2879 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2880 
2881 	/* Validate the response entry handle. */
2882 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2883 	index = resp_identifier & OSC_INDEX_MASK;
2884 	if (index < MAX_OUTSTANDING_COMMANDS) {
2885 		/* the index seems reasonable */
2886 		sp = ha->outstanding_cmds[index];
2887 		if (sp != NULL) {
2888 			if (sp->handle == resp_identifier) {
2889 				/* Neo, you're the one... */
2890 				ha->outstanding_cmds[index] = NULL;
2891 				sp->handle = 0;
2892 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2893 			} else {
2894 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2895 				    resp_identifier, sp->handle);
2896 				sp = NULL;
2897 				ql_signal_abort(ha, set_flags);
2898 			}
2899 		} else {
2900 			sp = ql_verify_preprocessed_cmd(ha,
2901 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2902 		}
2903 	} else {
2904 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2905 		    index, resp_identifier);
2906 		ql_signal_abort(ha, set_flags);
2907 	}
2908 
2909 	if (sp != NULL) {
2910 		if (!(sp->flags & SRB_MS_PKT)) {
2911 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
2912 			    sp->flags);
2913 			*set_flags |= ISP_ABORT_NEEDED;
2914 			return;
2915 		}
2916 
2917 		tq = sp->lun_queue->target_queue;
2918 
2919 		/* Set ISP completion status */
2920 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2921 			sp->pkt->pkt_reason = ddi_get16(
2922 			    ha->hba_buf.acc_handle, &pkt24->status);
2923 		} else {
2924 			sp->pkt->pkt_reason = ddi_get16(
2925 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2926 		}
2927 
2928 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
2929 		    sp->retry_count) {
2930 			EL(ha, "Resouce Unavailable Retry = %d\n",
2931 			    sp->retry_count);
2932 
2933 			/* Set retry status. */
2934 			sp->retry_count--;
2935 			sp->flags |= SRB_RETRY;
2936 
2937 			/* Acquire device queue lock. */
2938 			DEVICE_QUEUE_LOCK(tq);
2939 
2940 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
2941 				tq->flags |= TQF_QUEUE_SUSPENDED;
2942 
2943 				ADAPTER_STATE_LOCK(ha);
2944 				if (ha->port_retry_timer == 0) {
2945 					ha->port_retry_timer = 2;
2946 				}
2947 				ADAPTER_STATE_UNLOCK(ha);
2948 			}
2949 
2950 			/* Release device queue specific lock. */
2951 			DEVICE_QUEUE_UNLOCK(tq);
2952 
2953 		} else if (tq->port_down_retry_count &&
2954 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2955 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2956 			EL(ha, "Port Down Retry\n");
2957 
2958 			/* Set retry status. */
2959 			sp->flags |= SRB_RETRY;
2960 
2961 			/* Acquire device queue lock. */
2962 			DEVICE_QUEUE_LOCK(tq);
2963 
2964 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2965 				tq->flags |= TQF_QUEUE_SUSPENDED;
2966 
2967 				tq->port_down_retry_count--;
2968 
2969 				ADAPTER_STATE_LOCK(ha);
2970 				if (ha->port_retry_timer == 0) {
2971 					if ((ha->port_retry_timer =
2972 					    ha->port_down_retry_delay) == 0) {
2973 						*set_flags |=
2974 						    PORT_RETRY_NEEDED;
2975 					}
2976 				}
2977 				ADAPTER_STATE_UNLOCK(ha);
2978 			}
2979 			/* Release device queue specific lock. */
2980 			DEVICE_QUEUE_UNLOCK(tq);
2981 
2982 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2983 			EL(ha, "Reset Retry\n");
2984 
2985 			/* Set retry status. */
2986 			sp->flags |= SRB_RETRY;
2987 
2988 		} else if (CFG_IST(ha, CFG_CTRL_242581) &&
2989 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
2990 			cnt = ddi_get32(ha->hba_buf.acc_handle,
2991 			    &pkt24->resp_byte_count);
2992 			if (cnt < sizeof (fc_ct_header_t)) {
2993 				EL(ha, "Data underrun\n");
2994 			} else {
2995 				sp->pkt->pkt_reason = CS_COMPLETE;
2996 			}
2997 
2998 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
2999 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3000 		}
3001 
3002 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
3003 			/*EMPTY*/
3004 			QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
3005 			    ha->instance, sp->pkt->pkt_cmd[8],
3006 			    sp->pkt->pkt_cmd[9]);
3007 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3008 		}
3009 
3010 		/* For nameserver restore command, management change header. */
3011 		if ((sp->flags & SRB_RETRY) == 0) {
3012 			tq->d_id.b24 == 0xfffffc ?
3013 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3014 			    sp->pkt->pkt_cmd, B_TRUE) :
3015 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3016 			    sp->pkt->pkt_resp, B_TRUE);
3017 		}
3018 
3019 		/* Set completed status. */
3020 		sp->flags |= SRB_ISP_COMPLETED;
3021 
3022 		/* Place command on done queue. */
3023 		ql_add_link_b(done_q, &sp->cmd);
3024 
3025 	}
3026 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3027 }
3028 
3029 /*
3030  * ql_report_id_entry
3031  *	Processes received Name/Management/CT Pass-Through entry.
3032  *
3033  * Input:
3034  *	ha:		adapter state pointer.
3035  *	pkt23:		entry pointer.
3036  *	done_q:		done queue pointer.
3037  *	set_flags:	task daemon flags to set.
3038  *	reset_flags:	task daemon flags to reset.
3039  *
3040  * Context:
3041  *	Interrupt or Kernel context, no mailbox commands allowed.
3042  */
3043 /* ARGSUSED */
3044 static void
3045 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3046     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3047 {
3048 	ql_adapter_state_t	*vha;
3049 
3050 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3051 
3052 	EL(ha, "format=%d, vp=%d, status=%d\n",
3053 	    pkt->format, pkt->vp_index, pkt->status);
3054 
3055 	if (pkt->format == 1) {
3056 		/* Locate port state structure. */
3057 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3058 			if (vha->vp_index == pkt->vp_index) {
3059 				break;
3060 			}
3061 		}
3062 		if (vha != NULL && vha->vp_index != 0 &&
3063 		    (pkt->status == CS_COMPLETE ||
3064 		    pkt->status == CS_PORT_ID_CHANGE)) {
3065 			*set_flags |= LOOP_RESYNC_NEEDED;
3066 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3067 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3068 			TASK_DAEMON_LOCK(ha);
3069 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3070 			vha->task_daemon_flags &= ~LOOP_DOWN;
3071 			TASK_DAEMON_UNLOCK(ha);
3072 		}
3073 	}
3074 
3075 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3076 }
3077 
3078 /*
3079  * ql_els_entry
3080  *	Processes received ELS Pass-Through entry.
3081  *
3082  * Input:
3083  *	ha:		adapter state pointer.
3084  *	pkt23:		entry pointer.
3085  *	done_q:		done queue pointer.
3086  *	set_flags:	task daemon flags to set.
3087  *	reset_flags:	task daemon flags to reset.
3088  *
3089  * Context:
3090  *	Interrupt or Kernel context, no mailbox commands allowed.
3091  */
3092 /* ARGSUSED */
3093 static void
3094 ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp,
3095     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3096 {
3097 	ql_tgt_t	*tq;
3098 	port_id_t	d_id, s_id;
3099 	ql_srb_t	*srb;
3100 	uint32_t	index, resp_identifier;
3101 
3102 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3103 
3104 	/* Validate the response entry handle. */
3105 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle);
3106 	index = resp_identifier & OSC_INDEX_MASK;
3107 	if (index < MAX_OUTSTANDING_COMMANDS) {
3108 		/* the index seems reasonable */
3109 		srb = ha->outstanding_cmds[index];
3110 		if (srb != NULL) {
3111 			if (srb->handle == resp_identifier) {
3112 				/* Neo, you're the one... */
3113 				ha->outstanding_cmds[index] = NULL;
3114 				srb->handle = 0;
3115 				srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3116 			} else {
3117 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3118 				    resp_identifier, srb->handle);
3119 				srb = NULL;
3120 				ql_signal_abort(ha, set_flags);
3121 			}
3122 		} else {
3123 			srb = ql_verify_preprocessed_cmd(ha,
3124 			    (uint32_t *)&rsp->handle, set_flags, reset_flags);
3125 		}
3126 	} else {
3127 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3128 		    index, resp_identifier);
3129 		ql_signal_abort(ha, set_flags);
3130 	}
3131 
3132 	if (srb != NULL) {
3133 		if (!(srb->flags & SRB_ELS_PKT)) {
3134 			EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed",
3135 			    srb->flags);
3136 			*set_flags |= ISP_ABORT_NEEDED;
3137 			return;
3138 		}
3139 
3140 		(void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3141 		    DDI_DMA_SYNC_FORKERNEL);
3142 
3143 		/* Set ISP completion status */
3144