1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_isr.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_init.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57     uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60     uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62     uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65     uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67     uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69     uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71     ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73     ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75     ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77     uint32_t *, uint32_t *);
78 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79     ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83     ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85     uint32_t *, uint32_t *);
86 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87     ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_els_passthru_entry(ql_adapter_state_t *,
89     els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 
91 /*
92  * ql_isr
93  *	Process all INTX intr types.
94  *
95  * Input:
96  *	arg1:	adapter state pointer.
97  *
98  * Returns:
99  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
100  *
101  * Context:
102  *	Interrupt or Kernel context, no mailbox commands allowed.
103  */
104 /* ARGSUSED */
105 uint_t
106 ql_isr(caddr_t arg1)
107 {
108 	return (ql_isr_aif(arg1, 0));
109 }
110 
111 /*
112  * ql_isr_default
113  *	Process unknown/unvectored intr types
114  *
115  * Input:
116  *	arg1:	adapter state pointer.
117  *	arg2:	interrupt vector.
118  *
119  * Returns:
120  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
121  *
122  * Context:
123  *	Interrupt or Kernel context, no mailbox commands allowed.
124  */
125 /* ARGSUSED */
126 uint_t
127 ql_isr_default(caddr_t arg1, caddr_t arg2)
128 {
129 	ql_adapter_state_t	*ha = (void *)arg1;
130 
131 	EL(ha, "isr_default called: idx=%x\n", arg2);
132 	return (ql_isr_aif(arg1, arg2));
133 }
134 
135 /*
136  * ql_isr_aif
137  *	Process mailbox and I/O command completions.
138  *
139  * Input:
140  *	arg:	adapter state pointer.
141  *	intvec:	interrupt vector.
142  *
143  * Returns:
144  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
145  *
146  * Context:
147  *	Interrupt or Kernel context, no mailbox commands allowed.
148  */
149 /* ARGSUSED */
150 uint_t
151 ql_isr_aif(caddr_t arg, caddr_t intvec)
152 {
153 	uint16_t		mbx;
154 	uint32_t		stat;
155 	ql_adapter_state_t	*ha = (void *)arg;
156 	uint32_t		set_flags = 0;
157 	uint32_t		reset_flags = 0;
158 	ql_head_t		isr_done_q = {NULL, NULL};
159 	uint_t			rval = DDI_INTR_UNCLAIMED;
160 	int			spurious_intr = 0;
161 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
162 	int			intr_loop = 4;
163 
164 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
165 
166 	QL_PM_LOCK(ha);
167 	if (ha->power_level != PM_LEVEL_D0) {
168 		/*
169 		 * Looks like we are about to go down soon, exit early.
170 		 */
171 		QL_PM_UNLOCK(ha);
172 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
173 		return (DDI_INTR_UNCLAIMED);
174 	}
175 	ha->busy++;
176 	QL_PM_UNLOCK(ha);
177 
178 	/* Acquire interrupt lock. */
179 	INTR_LOCK(ha);
180 
181 	if (CFG_IST(ha, CFG_CTRL_2200)) {
182 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
183 			/* Reset idle timer. */
184 			ha->idle_timer = 0;
185 			rval = DDI_INTR_CLAIMED;
186 			if (intr_loop) {
187 				intr_loop--;
188 			}
189 
190 			/* Special Fast Post 2200. */
191 			stat = 0;
192 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
193 			    ha->flags & ONLINE) {
194 				ql_srb_t	*sp;
195 
196 				mbx = RD16_IO_REG(ha, mailbox[23]);
197 
198 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
199 					/* Release mailbox registers. */
200 					WRT16_IO_REG(ha, semaphore, 0);
201 
202 					if (intr_loop) {
203 						WRT16_IO_REG(ha, hccr,
204 						    HC_CLR_RISC_INT);
205 					}
206 
207 					/* Get handle. */
208 					mbx >>= 4;
209 					stat = mbx & OSC_INDEX_MASK;
210 
211 					/* Validate handle. */
212 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
213 					    ha->outstanding_cmds[stat] : NULL;
214 
215 					if (sp != NULL && (sp->handle & 0xfff)
216 					    == mbx) {
217 						ha->outstanding_cmds[stat] =
218 						    NULL;
219 						sp->handle = 0;
220 						sp->flags &=
221 						    ~SRB_IN_TOKEN_ARRAY;
222 
223 						/* Set completed status. */
224 						sp->flags |= SRB_ISP_COMPLETED;
225 
226 						/* Set completion status */
227 						sp->pkt->pkt_reason =
228 						    CS_COMPLETE;
229 
230 						ql_fast_fcp_post(sp);
231 					} else if (mbx !=
232 					    (QL_FCA_BRAND & 0xfff)) {
233 						if (sp == NULL) {
234 							EL(ha, "unknown IOCB"
235 							    " handle=%xh\n",
236 							    mbx);
237 						} else {
238 							EL(ha, "mismatch IOCB"
239 							    " handle pkt=%xh, "
240 							    "sp=%xh\n", mbx,
241 							    sp->handle & 0xfff);
242 						}
243 
244 						(void) ql_binary_fw_dump(ha,
245 						    FALSE);
246 
247 						if (!(ha->task_daemon_flags &
248 						    (ISP_ABORT_NEEDED |
249 						    ABORT_ISP_ACTIVE))) {
250 							EL(ha, "ISP Invalid "
251 							    "handle, "
252 							    "isp_abort_needed"
253 							    "\n");
254 							set_flags |=
255 							    ISP_ABORT_NEEDED;
256 						}
257 					}
258 				}
259 			}
260 
261 			if (stat == 0) {
262 				/* Check for mailbox interrupt. */
263 				mbx = RD16_IO_REG(ha, semaphore);
264 				if (mbx & BIT_0) {
265 					/* Release mailbox registers. */
266 					WRT16_IO_REG(ha, semaphore, 0);
267 
268 					/* Get mailbox data. */
269 					mbx = RD16_IO_REG(ha, mailbox[0]);
270 					if (mbx > 0x3fff && mbx < 0x8000) {
271 						ql_mbx_completion(ha, mbx,
272 						    &set_flags, &reset_flags,
273 						    intr_loop);
274 					} else if (mbx > 0x7fff &&
275 					    mbx < 0xc000) {
276 						ql_async_event(ha, mbx,
277 						    &isr_done_q, &set_flags,
278 						    &reset_flags, intr_loop);
279 					} else {
280 						EL(ha, "UNKNOWN interrupt "
281 						    "type\n");
282 						intr = B_TRUE;
283 					}
284 				} else {
285 					ha->isp_rsp_index = RD16_IO_REG(ha,
286 					    resp_in);
287 
288 					if (ha->isp_rsp_index !=
289 					    ha->rsp_ring_index) {
290 						ql_response_pkt(ha,
291 						    &isr_done_q, &set_flags,
292 						    &reset_flags, intr_loop);
293 					} else if (++spurious_intr ==
294 					    MAX_SPURIOUS_INTR) {
295 						/*
296 						 * Process excessive
297 						 * spurious intrrupts
298 						 */
299 						ql_spurious_intr(ha,
300 						    intr_loop);
301 						EL(ha, "excessive spurious "
302 						    "interrupts, "
303 						    "isp_abort_needed\n");
304 						set_flags |= ISP_ABORT_NEEDED;
305 					} else {
306 						intr = B_TRUE;
307 					}
308 				}
309 			}
310 
311 			/* Clear RISC interrupt */
312 			if (intr || intr_loop == 0) {
313 				intr = B_FALSE;
314 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
315 			}
316 
317 			if (set_flags != 0 || reset_flags != 0) {
318 				TASK_DAEMON_LOCK(ha);
319 				ha->task_daemon_flags |= set_flags;
320 				ha->task_daemon_flags &= ~reset_flags;
321 				TASK_DAEMON_UNLOCK(ha);
322 				set_flags = 0;
323 				reset_flags = 0;
324 				daemon = B_TRUE;
325 			}
326 		}
327 	} else {
328 		while ((stat = RD32_IO_REG(ha, intr_info_lo)) & RH_RISC_INT) {
329 			/* Capture FW defined interrupt info */
330 			mbx = MSW(stat);
331 
332 			/* Reset idle timer. */
333 			ha->idle_timer = 0;
334 			rval = DDI_INTR_CLAIMED;
335 			if (intr_loop) {
336 				intr_loop--;
337 			}
338 
339 			switch (stat & 0x1ff) {
340 			case ROM_MBX_SUCCESS:
341 			case ROM_MBX_ERR:
342 				ql_mbx_completion(ha, mbx, &set_flags,
343 				    &reset_flags, intr_loop);
344 
345 				/* Release mailbox registers. */
346 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
347 					WRT16_IO_REG(ha, semaphore, 0);
348 				}
349 				break;
350 
351 			case MBX_SUCCESS:
352 			case MBX_ERR:
353 				/* Sun FW, Release mailbox registers. */
354 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
355 					WRT16_IO_REG(ha, semaphore, 0);
356 				}
357 				ql_mbx_completion(ha, mbx, &set_flags,
358 				    &reset_flags, intr_loop);
359 				break;
360 
361 			case ASYNC_EVENT:
362 				/* Sun FW, Release mailbox registers. */
363 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
364 					WRT16_IO_REG(ha, semaphore, 0);
365 				}
366 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
367 				    &set_flags, &reset_flags, intr_loop);
368 				break;
369 
370 			case RESP_UPDATE:
371 				if (mbx != ha->rsp_ring_index) {
372 					ha->isp_rsp_index = mbx;
373 					ql_response_pkt(ha, &isr_done_q,
374 					    &set_flags, &reset_flags,
375 					    intr_loop);
376 				} else if (++spurious_intr ==
377 				    MAX_SPURIOUS_INTR) {
378 					/* Process excessive spurious intr. */
379 					ql_spurious_intr(ha, intr_loop);
380 					EL(ha, "excessive spurious "
381 					    "interrupts, isp_abort_needed\n");
382 					set_flags |= ISP_ABORT_NEEDED;
383 				} else {
384 					intr = B_TRUE;
385 				}
386 				break;
387 
388 			case SCSI_FAST_POST_16:
389 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
390 				ql_async_event(ha, stat, &isr_done_q,
391 				    &set_flags, &reset_flags, intr_loop);
392 				break;
393 
394 			case SCSI_FAST_POST_32:
395 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
396 				ql_async_event(ha, stat, &isr_done_q,
397 				    &set_flags, &reset_flags, intr_loop);
398 				break;
399 
400 			case CTIO_FAST_POST:
401 				stat = (stat & 0xffff0000) |
402 				    MBA_CTIO_COMPLETION;
403 				ql_async_event(ha, stat, &isr_done_q,
404 				    &set_flags, &reset_flags, intr_loop);
405 				break;
406 
407 			case IP_FAST_POST_XMT:
408 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
409 				ql_async_event(ha, stat, &isr_done_q,
410 				    &set_flags, &reset_flags, intr_loop);
411 				break;
412 
413 			case IP_FAST_POST_RCV:
414 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
415 				ql_async_event(ha, stat, &isr_done_q,
416 				    &set_flags, &reset_flags, intr_loop);
417 				break;
418 
419 			case IP_FAST_POST_BRD:
420 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
421 				ql_async_event(ha, stat, &isr_done_q,
422 				    &set_flags, &reset_flags, intr_loop);
423 				break;
424 
425 			case IP_FAST_POST_RCV_ALN:
426 				stat = (stat & 0xffff0000) |
427 				    MBA_IP_HDR_DATA_SPLIT;
428 				ql_async_event(ha, stat, &isr_done_q,
429 				    &set_flags, &reset_flags, intr_loop);
430 				break;
431 
432 			case ATIO_UPDATE:
433 				EL(ha, "unsupported ATIO queue update"
434 				    " interrupt, status=%xh\n", stat);
435 				intr = B_TRUE;
436 				break;
437 
438 			case ATIO_RESP_UPDATE:
439 				EL(ha, "unsupported ATIO response queue "
440 				    "update interrupt, status=%xh\n", stat);
441 				intr = B_TRUE;
442 				break;
443 
444 			default:
445 				ql_handle_uncommon_risc_intr(ha, stat,
446 				    &set_flags);
447 				intr = B_TRUE;
448 				break;
449 			}
450 
451 			/* Clear RISC interrupt */
452 			if (intr || intr_loop == 0) {
453 				intr = B_FALSE;
454 				CFG_IST(ha, CFG_CTRL_242581) ?
455 				    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
456 				    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
457 			}
458 
459 			if (set_flags != 0 || reset_flags != 0) {
460 				TASK_DAEMON_LOCK(ha);
461 				ha->task_daemon_flags |= set_flags;
462 				ha->task_daemon_flags &= ~reset_flags;
463 				TASK_DAEMON_UNLOCK(ha);
464 				set_flags = 0;
465 				reset_flags = 0;
466 				daemon = B_TRUE;
467 			}
468 
469 			if (ha->flags & PARITY_ERROR) {
470 				EL(ha, "parity/pause exit\n");
471 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
472 				break;
473 			}
474 		}
475 	}
476 
477 	/* Process claimed interrupts during polls. */
478 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
479 		ha->intr_claimed = B_FALSE;
480 		rval = DDI_INTR_CLAIMED;
481 	}
482 
483 	/* Release interrupt lock. */
484 	INTR_UNLOCK(ha);
485 
486 	if (daemon) {
487 		ql_awaken_task_daemon(ha, NULL, 0, 0);
488 	}
489 
490 	if (isr_done_q.first != NULL) {
491 		ql_done(isr_done_q.first);
492 	}
493 
494 	if (rval == DDI_INTR_CLAIMED) {
495 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
496 		ha->xioctl->TotalInterrupts++;
497 	} else {
498 		/*EMPTY*/
499 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
500 		    ha->instance);
501 	}
502 
503 	QL_PM_LOCK(ha);
504 	ha->busy--;
505 	QL_PM_UNLOCK(ha);
506 
507 	return (rval);
508 }
509 
510 /*
511  * ql_handle_uncommon_risc_intr
512  *	Handle an uncommon RISC interrupt.
513  *
514  * Input:
515  *	ha:		adapter state pointer.
516  *	stat:		interrupt status
517  *
518  * Context:
519  *	Interrupt or Kernel context, no mailbox commands allowed.
520  */
521 static void
522 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
523     uint32_t *set_flags)
524 {
525 	uint16_t	hccr_reg;
526 
527 	hccr_reg = RD16_IO_REG(ha, hccr);
528 
529 	if (stat & RH_RISC_PAUSED ||
530 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
531 
532 		ADAPTER_STATE_LOCK(ha);
533 		ha->flags |= PARITY_ERROR;
534 		ADAPTER_STATE_UNLOCK(ha);
535 
536 		if (ha->parity_pause_errors == 0 ||
537 		    ha->parity_hccr_err != hccr_reg ||
538 		    ha->parity_stat_err != stat) {
539 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
540 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
541 			    ha->instance, hccr_reg, stat,
542 			    ha->parity_pause_errors);
543 			ha->parity_hccr_err = hccr_reg;
544 			ha->parity_stat_err = stat;
545 		}
546 
547 		EL(ha, "parity/pause error, isp_abort_needed\n");
548 
549 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
550 			ql_reset_chip(ha);
551 		}
552 
553 		if (ha->parity_pause_errors == 0) {
554 			(void) ql_flash_errlog(ha, FLASH_ERRLOG_PARITY_ERR,
555 			    0, MSW(stat), LSW(stat));
556 		}
557 
558 		if (ha->parity_pause_errors < 0xffffffff) {
559 			ha->parity_pause_errors++;
560 		}
561 
562 		*set_flags |= ISP_ABORT_NEEDED;
563 
564 		/* Disable ISP interrupts. */
565 		WRT16_IO_REG(ha, ictrl, 0);
566 		ADAPTER_STATE_LOCK(ha);
567 		ha->flags &= ~INTERRUPTS_ENABLED;
568 		ADAPTER_STATE_UNLOCK(ha);
569 	} else {
570 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
571 		    stat, hccr_reg);
572 	}
573 }
574 
575 /*
576  * ql_spurious_intr
577  *	Inform Solaris of spurious interrupts.
578  *
579  * Input:
580  *	ha:		adapter state pointer.
581  *	intr_clr:	early interrupt clear
582  *
583  * Context:
584  *	Interrupt or Kernel context, no mailbox commands allowed.
585  */
586 static void
587 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
588 {
589 	ddi_devstate_t	state;
590 
591 	EL(ha, "Spurious interrupt\n");
592 
593 	/* Disable ISP interrupts. */
594 	WRT16_IO_REG(ha, ictrl, 0);
595 	ADAPTER_STATE_LOCK(ha);
596 	ha->flags &= ~INTERRUPTS_ENABLED;
597 	ADAPTER_STATE_UNLOCK(ha);
598 
599 	/* Clear RISC interrupt */
600 	if (intr_clr) {
601 		CFG_IST(ha, CFG_CTRL_242581) ?
602 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
603 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
604 	}
605 
606 	state = ddi_get_devstate(ha->dip);
607 	if (state == DDI_DEVSTATE_UP) {
608 		/*EMPTY*/
609 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
610 		    DDI_DEVICE_FAULT, "spurious interrupts");
611 	}
612 }
613 
614 /*
615  * ql_mbx_completion
616  *	Processes mailbox completions.
617  *
618  * Input:
619  *	ha:		adapter state pointer.
620  *	mb0:		Mailbox 0 contents.
621  *	set_flags:	task daemon flags to set.
622  *	reset_flags:	task daemon flags to reset.
623  *	intr_clr:	early interrupt clear
624  *
625  * Context:
626  *	Interrupt context.
627  */
628 /* ARGSUSED */
629 static void
630 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
631     uint32_t *reset_flags, int intr_clr)
632 {
633 	uint32_t	index;
634 	uint16_t	cnt;
635 
636 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
637 
638 	/* Load return mailbox registers. */
639 	MBX_REGISTER_LOCK(ha);
640 
641 	if (ha->mcp != NULL) {
642 		ha->mcp->mb[0] = mb0;
643 		index = ha->mcp->in_mb & ~MBX_0;
644 
645 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
646 			index >>= 1;
647 			if (index & MBX_0) {
648 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
649 				    mailbox[cnt]);
650 			}
651 		}
652 
653 	} else {
654 		EL(ha, "mcp == NULL\n");
655 	}
656 
657 	if (intr_clr) {
658 		/* Clear RISC interrupt. */
659 		CFG_IST(ha, CFG_CTRL_242581) ?
660 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
661 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
662 	}
663 
664 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
665 	if (ha->flags & INTERRUPTS_ENABLED) {
666 		cv_broadcast(&ha->cv_mbx_intr);
667 	}
668 
669 	MBX_REGISTER_UNLOCK(ha);
670 
671 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
672 }
673 
674 /*
675  * ql_async_event
676  *	Processes asynchronous events.
677  *
678  * Input:
679  *	ha:		adapter state pointer.
680  *	mbx:		Mailbox 0 register.
681  *	done_q:		head pointer to done queue.
682  *	set_flags:	task daemon flags to set.
683  *	reset_flags:	task daemon flags to reset.
684  *	intr_clr:	early interrupt clear
685  *
686  * Context:
687  *	Interrupt or Kernel context, no mailbox commands allowed.
688  */
689 static void
690 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
691     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
692 {
693 	uint32_t		handle;
694 	uint32_t		index;
695 	uint16_t		cnt;
696 	uint16_t		mb[MAX_MBOX_COUNT];
697 	ql_srb_t		*sp;
698 	port_id_t		s_id;
699 	ql_tgt_t		*tq;
700 	boolean_t		intr = B_TRUE;
701 	ql_adapter_state_t	*vha;
702 
703 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
704 
705 	/* Setup to process fast completion. */
706 	mb[0] = LSW(mbx);
707 	switch (mb[0]) {
708 	case MBA_SCSI_COMPLETION:
709 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox[1]),
710 		    RD16_IO_REG(ha, mailbox[2]));
711 		break;
712 
713 	case MBA_CMPLT_1_16BIT:
714 		handle = MSW(mbx);
715 		mb[0] = MBA_SCSI_COMPLETION;
716 		break;
717 
718 	case MBA_CMPLT_1_32BIT:
719 		handle = SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
720 		mb[0] = MBA_SCSI_COMPLETION;
721 		break;
722 
723 	case MBA_CTIO_COMPLETION:
724 	case MBA_IP_COMPLETION:
725 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
726 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2])) :
727 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
728 		mb[0] = MBA_SCSI_COMPLETION;
729 		break;
730 
731 	default:
732 		break;
733 	}
734 
735 	/* Handle asynchronous event */
736 	switch (mb[0]) {
737 	case MBA_SCSI_COMPLETION:
738 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
739 		    ha->instance);
740 
741 		if (intr_clr) {
742 			/* Clear RISC interrupt */
743 			CFG_IST(ha, CFG_CTRL_242581) ?
744 			    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
745 			    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
746 			intr = B_FALSE;
747 		}
748 
749 		if ((ha->flags & ONLINE) == 0) {
750 			break;
751 		}
752 
753 		/* Get handle. */
754 		index = handle & OSC_INDEX_MASK;
755 
756 		/* Validate handle. */
757 		sp = index < MAX_OUTSTANDING_COMMANDS ?
758 		    ha->outstanding_cmds[index] : NULL;
759 
760 		if (sp != NULL && sp->handle == handle) {
761 			ha->outstanding_cmds[index] = NULL;
762 			sp->handle = 0;
763 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
764 
765 			/* Set completed status. */
766 			sp->flags |= SRB_ISP_COMPLETED;
767 
768 			/* Set completion status */
769 			sp->pkt->pkt_reason = CS_COMPLETE;
770 
771 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
772 				/* Place block on done queue */
773 				ql_add_link_b(done_q, &sp->cmd);
774 			} else {
775 				ql_fast_fcp_post(sp);
776 			}
777 		} else if (handle != QL_FCA_BRAND) {
778 			if (sp == NULL) {
779 				EL(ha, "%xh unknown IOCB handle=%xh\n",
780 				    mb[0], handle);
781 			} else {
782 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
783 				    "sp=%xh\n", mb[0], handle, sp->handle);
784 			}
785 
786 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
787 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
788 			    RD16_IO_REG(ha, mailbox[1]),
789 			    RD16_IO_REG(ha, mailbox[2]),
790 			    RD16_IO_REG(ha, mailbox[3]),
791 			    RD16_IO_REG(ha, mailbox[6]),
792 			    RD16_IO_REG(ha, mailbox[7]));
793 
794 			(void) ql_binary_fw_dump(ha, FALSE);
795 
796 			if (!(ha->task_daemon_flags &
797 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
798 				EL(ha, "%xh ISP Invalid handle, "
799 				    "isp_abort_needed\n", mb[0]);
800 				*set_flags |= ISP_ABORT_NEEDED;
801 			}
802 		}
803 		break;
804 
805 	case MBA_RESET:		/* Reset */
806 		EL(ha, "%xh Reset received\n", mb[0]);
807 		*set_flags |= RESET_MARKER_NEEDED;
808 		break;
809 
810 	case MBA_SYSTEM_ERR:		/* System Error */
811 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
812 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
813 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
814 		mb[7] = RD16_IO_REG(ha, mailbox[7]);
815 
816 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
817 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
818 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
819 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
820 		    RD16_IO_REG(ha, mailbox[4]), RD16_IO_REG(ha, mailbox[5]),
821 		    RD16_IO_REG(ha, mailbox[6]), mb[7],
822 		    RD16_IO_REG(ha, mailbox[8]), RD16_IO_REG(ha, mailbox[9]),
823 		    RD16_IO_REG(ha, mailbox[10]), RD16_IO_REG(ha, mailbox[11]),
824 		    RD16_IO_REG(ha, mailbox[12]));
825 
826 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
827 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
828 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
829 		    mb[0], RD16_IO_REG(ha, mailbox[13]),
830 		    RD16_IO_REG(ha, mailbox[14]), RD16_IO_REG(ha, mailbox[15]),
831 		    RD16_IO_REG(ha, mailbox[16]), RD16_IO_REG(ha, mailbox[17]),
832 		    RD16_IO_REG(ha, mailbox[18]), RD16_IO_REG(ha, mailbox[19]),
833 		    RD16_IO_REG(ha, mailbox[20]), RD16_IO_REG(ha, mailbox[21]),
834 		    RD16_IO_REG(ha, mailbox[22]),
835 		    RD16_IO_REG(ha, mailbox[23]));
836 
837 		if (ha->reg_off->mbox_cnt > 24) {
838 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
839 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
840 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
841 			    RD16_IO_REG(ha, mailbox[24]),
842 			    RD16_IO_REG(ha, mailbox[25]),
843 			    RD16_IO_REG(ha, mailbox[26]),
844 			    RD16_IO_REG(ha, mailbox[27]),
845 			    RD16_IO_REG(ha, mailbox[28]),
846 			    RD16_IO_REG(ha, mailbox[29]),
847 			    RD16_IO_REG(ha, mailbox[30]),
848 			    RD16_IO_REG(ha, mailbox[31]));
849 		}
850 
851 		(void) ql_binary_fw_dump(ha, FALSE);
852 
853 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
854 		    mb[2], mb[3]);
855 
856 		if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
857 			ADAPTER_STATE_LOCK(ha);
858 			ha->flags |= MPI_RESET_NEEDED;
859 			ADAPTER_STATE_UNLOCK(ha);
860 		}
861 
862 		*set_flags |= ISP_ABORT_NEEDED;
863 		ha->xioctl->ControllerErrorCount++;
864 		break;
865 
866 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
867 		EL(ha, "%xh Request Transfer Error received, "
868 		    "isp_abort_needed\n", mb[0]);
869 
870 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
871 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
872 		    RD16_IO_REG(ha, mailbox[3]));
873 
874 		*set_flags |= ISP_ABORT_NEEDED;
875 		ha->xioctl->ControllerErrorCount++;
876 		break;
877 
878 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
879 		EL(ha, "%xh Response Transfer Error received,"
880 		    " isp_abort_needed\n", mb[0]);
881 
882 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
883 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
884 		    RD16_IO_REG(ha, mailbox[3]));
885 
886 		*set_flags |= ISP_ABORT_NEEDED;
887 		ha->xioctl->ControllerErrorCount++;
888 		break;
889 
890 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
891 		EL(ha, "%xh Request Queue Wake-up received\n",
892 		    mb[0]);
893 		break;
894 
895 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
896 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
897 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
898 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
899 
900 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
901 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
902 
903 		switch (mb[1]) {
904 		case MLA_LOGIN_OPERATIONAL_FW:
905 			ADAPTER_STATE_LOCK(ha);
906 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
907 			ADAPTER_STATE_UNLOCK(ha);
908 			break;
909 		case MLA_PANIC_RECOVERY:
910 		case MLA_LOGIN_DIAGNOSTIC_FW:
911 		case MLA_LOGIN_GOLDEN_FW:
912 		case MLA_REJECT_RESPONSE:
913 		default:
914 			break;
915 		}
916 		break;
917 
918 	case MBA_LIP_F8:	/* Received a LIP F8. */
919 	case MBA_LIP_RESET:	/* LIP reset occurred. */
920 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
921 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
922 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
923 			    "\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
924 			    RD16_IO_REG(ha, mailbox[2]));
925 		} else {
926 			EL(ha, "%xh LIP received\n", mb[0]);
927 		}
928 
929 		ADAPTER_STATE_LOCK(ha);
930 		ha->flags &= ~POINT_TO_POINT;
931 		ADAPTER_STATE_UNLOCK(ha);
932 
933 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
934 			*set_flags |= LOOP_DOWN;
935 		}
936 		ql_port_state(ha, FC_STATE_OFFLINE,
937 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
938 
939 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
940 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
941 		}
942 
943 		ha->adapter_stats->lip_count++;
944 
945 		/* Update AEN queue. */
946 		ha->xioctl->TotalLipResets++;
947 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
948 			ql_enqueue_aen(ha, mb[0], NULL);
949 		}
950 		break;
951 
952 	case MBA_LOOP_UP:
953 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
954 		    CFG_CTRL_242581))) {
955 			mb[1] = RD16_IO_REG(ha, mailbox[1]);
956 			if (mb[1] == IIDMA_RATE_1GB) {		/* 1GB */
957 				ha->state = FC_PORT_STATE_MASK(
958 				    ha->state) | FC_STATE_1GBIT_SPEED;
959 				index = 1;
960 			} else if (mb[1] == IIDMA_RATE_2GB) {	/* 2GB */
961 				ha->state = FC_PORT_STATE_MASK(
962 				    ha->state) | FC_STATE_2GBIT_SPEED;
963 				index = 2;
964 			} else if (mb[1] == IIDMA_RATE_4GB) {	/* 4GB */
965 				ha->state = FC_PORT_STATE_MASK(
966 				    ha->state) | FC_STATE_4GBIT_SPEED;
967 				index = 4;
968 			} else if (mb[1] == IIDMA_RATE_8GB) {	/* 8GB */
969 				ha->state = FC_PORT_STATE_MASK(
970 				    ha->state) | FC_STATE_8GBIT_SPEED;
971 				index = 8;
972 			} else if (mb[1] == IIDMA_RATE_10GB) {	/* 10GB */
973 				ha->state = FC_PORT_STATE_MASK(
974 				    ha->state) | FC_STATE_10GBIT_SPEED;
975 				index = 10;
976 			} else {
977 				ha->state = FC_PORT_STATE_MASK(
978 				    ha->state);
979 				index = 0;
980 			}
981 		} else {
982 			ha->state = FC_PORT_STATE_MASK(ha->state) |
983 			    FC_STATE_FULL_SPEED;
984 			index = 1;
985 		}
986 
987 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
988 			vha->state = FC_PORT_STATE_MASK(vha->state) |
989 			    FC_PORT_SPEED_MASK(ha->state);
990 		}
991 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
992 
993 		/* Update AEN queue. */
994 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
995 			ql_enqueue_aen(ha, mb[0], NULL);
996 		}
997 		break;
998 
999 	case MBA_LOOP_DOWN:
1000 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1001 		    "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1002 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]),
1003 		    RD16_IO_REG(ha, mailbox[4]));
1004 
1005 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1006 			*set_flags |= LOOP_DOWN;
1007 		}
1008 		ql_port_state(ha, FC_STATE_OFFLINE,
1009 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1010 
1011 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1012 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1013 		}
1014 
1015 		if (CFG_IST(ha, CFG_CTRL_2581)) {
1016 			ha->sfp_stat = RD16_IO_REG(ha, mailbox[2]);
1017 		}
1018 
1019 		/* Update AEN queue. */
1020 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1021 			ql_enqueue_aen(ha, mb[0], NULL);
1022 		}
1023 		break;
1024 
1025 	case MBA_PORT_UPDATE:
1026 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1027 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1028 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1029 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1030 
1031 		/* Locate port state structure. */
1032 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1033 			if (vha->vp_index == LSB(mb[3])) {
1034 				break;
1035 			}
1036 		}
1037 		if (vha == NULL) {
1038 			break;
1039 		}
1040 		/*
1041 		 * In N port 2 N port topology the FW provides a port
1042 		 * database entry at loop_id 0x7fe which we use to
1043 		 * acquire the Ports WWPN.
1044 		 */
1045 		if ((mb[1] != 0x7fe) &&
1046 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1047 		    (CFG_IST(ha, CFG_CTRL_242581) &&
1048 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1049 			EL(ha, "%xh Port Database Update, Login/Logout "
1050 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1051 			    mb[0], mb[1], mb[2], mb[3]);
1052 		} else {
1053 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1054 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1055 			    mb[3]);
1056 			*set_flags |= LOOP_RESYNC_NEEDED;
1057 			*set_flags &= ~LOOP_DOWN;
1058 			*reset_flags |= LOOP_DOWN;
1059 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1060 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1061 			TASK_DAEMON_LOCK(ha);
1062 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1063 			vha->task_daemon_flags &= ~LOOP_DOWN;
1064 			TASK_DAEMON_UNLOCK(ha);
1065 			ADAPTER_STATE_LOCK(ha);
1066 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1067 			ADAPTER_STATE_UNLOCK(ha);
1068 		}
1069 
1070 		/* Update AEN queue. */
1071 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1072 			ql_enqueue_aen(ha, mb[0], NULL);
1073 		}
1074 		break;
1075 
1076 	case MBA_RSCN_UPDATE:
1077 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1078 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1079 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1080 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1081 
1082 		/* Locate port state structure. */
1083 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1084 			if (vha->vp_index == LSB(mb[3])) {
1085 				break;
1086 			}
1087 		}
1088 
1089 		if (vha == NULL) {
1090 			break;
1091 		}
1092 
1093 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1094 		    MSB(mb[2]) == vha->d_id.b.area &&
1095 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1096 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1097 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1098 		} else {
1099 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1100 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1101 			if (FC_PORT_STATE_MASK(vha->state) !=
1102 			    FC_STATE_OFFLINE) {
1103 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1104 				TASK_DAEMON_LOCK(ha);
1105 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1106 				TASK_DAEMON_UNLOCK(ha);
1107 				*set_flags |= RSCN_UPDATE_NEEDED;
1108 			}
1109 		}
1110 
1111 		/* Update AEN queue. */
1112 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1113 			ql_enqueue_aen(ha, mb[0], NULL);
1114 		}
1115 		break;
1116 
1117 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1118 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1119 		    RD16_IO_REG(ha, mailbox[1]));
1120 		break;
1121 
1122 	case MBA_IP_RECEIVE:
1123 	case MBA_IP_BROADCAST:
1124 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1125 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1126 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
1127 
1128 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1129 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1130 
1131 		/* Locate device queue. */
1132 		s_id.b.al_pa = LSB(mb[2]);
1133 		s_id.b.area = MSB(mb[2]);
1134 		s_id.b.domain = LSB(mb[1]);
1135 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1136 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1137 			break;
1138 		}
1139 
1140 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1141 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1142 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1143 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1144 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1145 
1146 		tq->ub_sequence_length = mb[3];
1147 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1148 		if (mb[3] % cnt) {
1149 			tq->ub_total_seg_cnt++;
1150 		}
1151 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1152 
1153 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1154 		    index++) {
1155 			mb[index] = RD16_IO_REG(ha, mailbox[index]);
1156 		}
1157 
1158 		tq->ub_seq_id = ++ha->ub_seq_id;
1159 		tq->ub_seq_cnt = 0;
1160 		tq->ub_frame_ro = 0;
1161 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1162 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
1163 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1164 		ha->rcv_dev_q = tq;
1165 
1166 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1167 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1168 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1169 			    QL_SUCCESS) {
1170 				EL(ha, "ql_ub_frame_hdr failed, "
1171 				    "isp_abort_needed\n");
1172 				*set_flags |= ISP_ABORT_NEEDED;
1173 				break;
1174 			}
1175 		}
1176 		break;
1177 
1178 	case MBA_IP_LOW_WATER_MARK:
1179 	case MBA_IP_RCV_BUFFER_EMPTY:
1180 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1181 		    mb[0]);
1182 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1183 		break;
1184 
1185 	case MBA_IP_HDR_DATA_SPLIT:
1186 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1187 		break;
1188 
1189 	case MBA_ERROR_LOGGING_DISABLED:
1190 		EL(ha, "%xh error logging disabled received, "
1191 		    "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]));
1192 		break;
1193 
1194 	case MBA_POINT_TO_POINT:
1195 	/* case MBA_DCBX_COMPLETED: */
1196 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1197 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1198 		} else {
1199 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1200 		}
1201 		ADAPTER_STATE_LOCK(ha);
1202 		ha->flags |= POINT_TO_POINT;
1203 		ADAPTER_STATE_UNLOCK(ha);
1204 		break;
1205 
1206 	case MBA_FCF_CONFIG_ERROR:
1207 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1208 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1209 		break;
1210 
1211 	case MBA_DCBX_PARAM_CHANGED:
1212 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1213 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1214 		break;
1215 
1216 	case MBA_CHG_IN_CONNECTION:
1217 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1218 		if (mb[1] == 2) {
1219 			EL(ha, "%xh Change In Connection received, "
1220 			    "mbx1=%xh\n",  mb[0], mb[1]);
1221 			ADAPTER_STATE_LOCK(ha);
1222 			ha->flags &= ~POINT_TO_POINT;
1223 			ADAPTER_STATE_UNLOCK(ha);
1224 			if (ha->topology & QL_N_PORT) {
1225 				ha->topology = (uint8_t)(ha->topology &
1226 				    ~QL_N_PORT);
1227 				ha->topology = (uint8_t)(ha->topology |
1228 				    QL_NL_PORT);
1229 			}
1230 		} else {
1231 			EL(ha, "%xh Change In Connection received, "
1232 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1233 			*set_flags |= ISP_ABORT_NEEDED;
1234 		}
1235 		break;
1236 
1237 	case MBA_ZIO_UPDATE:
1238 		EL(ha, "%xh ZIO response received\n", mb[0]);
1239 
1240 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1241 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1242 		intr = B_FALSE;
1243 		break;
1244 
1245 	case MBA_PORT_BYPASS_CHANGED:
1246 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1247 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1248 		/*
1249 		 * Event generated when there is a transition on
1250 		 * port bypass of crystal+.
1251 		 * Mailbox 1:	Bit 0 - External.
1252 		 *		Bit 2 - Internal.
1253 		 * When the bit is 0, the port is bypassed.
1254 		 *
1255 		 * For now we will generate a LIP for all cases.
1256 		 */
1257 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1258 		break;
1259 
1260 	case MBA_RECEIVE_ERROR:
1261 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1262 		    mb[0], RD16_IO_REG(ha, mailbox[1]),
1263 		    RD16_IO_REG(ha, mailbox[2]));
1264 		break;
1265 
1266 	case MBA_LS_RJT_SENT:
1267 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1268 		    RD16_IO_REG(ha, mailbox[1]));
1269 		break;
1270 
1271 	case MBA_FW_RESTART_COMP:
1272 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1273 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1274 		break;
1275 
1276 	case MBA_IDC_COMPLETE:
1277 		EL(ha, "%xh Inter-driver communication complete received, "
1278 		    "mbx1=%xh, mbx2=%xh\n", mb[0],
1279 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1280 		break;
1281 
1282 	case MBA_IDC_NOTIFICATION:
1283 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox[1]);
1284 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox[2]);
1285 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox[3]);
1286 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox[4]);
1287 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox[5]);
1288 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox[6]);
1289 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox[7]);
1290 		EL(ha, "%xh Inter-driver communication request notification "
1291 		    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1292 		    "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1293 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1294 		    ha->idc_mb[6], ha->idc_mb[7]);
1295 		*set_flags |= IDC_ACK_NEEDED;
1296 		break;
1297 
1298 	case MBA_IDC_TIME_EXTENDED:
1299 		EL(ha, "%xh Inter-driver communication time extended received,"
1300 		    " mbx1=%xh, mbx2=%xh\n", mb[0],
1301 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1302 		break;
1303 
1304 	default:
1305 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1306 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1307 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
1308 		break;
1309 	}
1310 
1311 	/* Clear RISC interrupt */
1312 	if (intr && intr_clr) {
1313 		CFG_IST(ha, CFG_CTRL_242581) ?
1314 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1315 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1316 	}
1317 
1318 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1319 }
1320 
1321 /*
1322  * ql_fast_fcp_post
1323  *	Fast path for good SCSI I/O completion.
1324  *
1325  * Input:
1326  *	sp:	SRB pointer.
1327  *
1328  * Context:
1329  *	Interrupt or Kernel context, no mailbox commands allowed.
1330  */
1331 static void
1332 ql_fast_fcp_post(ql_srb_t *sp)
1333 {
1334 	ql_adapter_state_t	*ha = sp->ha;
1335 	ql_lun_t		*lq = sp->lun_queue;
1336 	ql_tgt_t		*tq = lq->target_queue;
1337 
1338 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1339 
1340 	ASSERT(sp->flags & SRB_FCP_CMD_PKT && ha &&
1341 	    sp->pkt->pkt_reason == CS_COMPLETE);
1342 
1343 	/* Acquire device queue lock. */
1344 	DEVICE_QUEUE_LOCK(tq);
1345 
1346 	/* Decrement outstanding commands on device. */
1347 	if (tq->outcnt != 0) {
1348 		tq->outcnt--;
1349 	}
1350 
1351 	if (sp->flags & SRB_FCP_CMD_PKT) {
1352 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1353 			/*
1354 			 * Clear the flag for this LUN so that
1355 			 * untagged commands can be submitted
1356 			 * for it.
1357 			 */
1358 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1359 		}
1360 
1361 		if (lq->lun_outcnt != 0) {
1362 			lq->lun_outcnt--;
1363 		}
1364 	}
1365 
1366 	/* Reset port down retry count on good completion. */
1367 	tq->port_down_retry_count = ha->port_down_retry_count;
1368 	tq->qfull_retry_count = ha->qfull_retry_count;
1369 
1370 	/* Remove command from watchdog queue. */
1371 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1372 		ql_remove_link(&tq->wdg, &sp->wdg);
1373 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1374 	}
1375 
1376 	if (lq->cmd.first != NULL) {
1377 		ql_next(ha, lq);
1378 	} else {
1379 		/* Release LU queue specific lock. */
1380 		DEVICE_QUEUE_UNLOCK(tq);
1381 		if (ha->pha->pending_cmds.first != NULL) {
1382 			ql_start_iocb(ha, NULL);
1383 		}
1384 	}
1385 
1386 	/* Sync buffers if required.  */
1387 	if (sp->flags & SRB_MS_PKT) {
1388 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1389 		    DDI_DMA_SYNC_FORCPU);
1390 	}
1391 
1392 	/* Map ISP completion codes. */
1393 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1394 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1395 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1396 
1397 	/* Now call the pkt completion callback */
1398 	if (sp->flags & SRB_POLL) {
1399 		sp->flags &= ~SRB_POLL;
1400 	} else if (sp->pkt->pkt_comp) {
1401 		INTR_UNLOCK(ha);
1402 		(*sp->pkt->pkt_comp)(sp->pkt);
1403 		INTR_LOCK(ha);
1404 	}
1405 
1406 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1407 }
1408 
1409 /*
1410  * ql_response_pkt
1411  *	Processes response entry.
1412  *
1413  * Input:
1414  *	ha:		adapter state pointer.
1415  *	done_q:		head pointer to done queue.
1416  *	set_flags:	task daemon flags to set.
1417  *	reset_flags:	task daemon flags to reset.
1418  *	intr_clr:	early interrupt clear
1419  *
1420  * Context:
1421  *	Interrupt or Kernel context, no mailbox commands allowed.
1422  */
1423 static void
1424 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1425     uint32_t *reset_flags, int intr_clr)
1426 {
1427 	response_t	*pkt;
1428 	uint32_t	dma_sync_size_1 = 0;
1429 	uint32_t	dma_sync_size_2 = 0;
1430 	int		status = 0;
1431 
1432 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1433 
1434 	/* Clear RISC interrupt */
1435 	if (intr_clr) {
1436 		CFG_IST(ha, CFG_CTRL_242581) ?
1437 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1438 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1439 	}
1440 
1441 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1442 		EL(ha, "index error = %xh, isp_abort_needed",
1443 		    ha->isp_rsp_index);
1444 		*set_flags |= ISP_ABORT_NEEDED;
1445 		return;
1446 	}
1447 
1448 	if ((ha->flags & ONLINE) == 0) {
1449 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1450 		return;
1451 	}
1452 
1453 	/* Calculate size of response queue entries to sync. */
1454 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1455 		dma_sync_size_1 = (uint32_t)
1456 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1457 		    RESPONSE_ENTRY_SIZE);
1458 	} else if (ha->isp_rsp_index == 0) {
1459 		dma_sync_size_1 = (uint32_t)
1460 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1461 		    RESPONSE_ENTRY_SIZE);
1462 	} else {
1463 		/* Responses wrap around the Q */
1464 		dma_sync_size_1 = (uint32_t)
1465 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1466 		    RESPONSE_ENTRY_SIZE);
1467 		dma_sync_size_2 = (uint32_t)
1468 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1469 	}
1470 
1471 	/* Sync DMA buffer. */
1472 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1473 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1474 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1475 	    DDI_DMA_SYNC_FORKERNEL);
1476 	if (dma_sync_size_2) {
1477 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1478 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1479 		    DDI_DMA_SYNC_FORKERNEL);
1480 	}
1481 
1482 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1483 		pkt = ha->response_ring_ptr;
1484 
1485 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1486 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1487 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1488 		    RESPONSE_ENTRY_SIZE);
1489 
1490 		/* Adjust ring index. */
1491 		ha->rsp_ring_index++;
1492 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1493 			ha->rsp_ring_index = 0;
1494 			ha->response_ring_ptr = ha->response_ring_bp;
1495 		} else {
1496 			ha->response_ring_ptr++;
1497 		}
1498 
1499 		/* Process packet. */
1500 		if (ha->status_srb != NULL && pkt->entry_type !=
1501 		    STATUS_CONT_TYPE) {
1502 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1503 			ha->status_srb = NULL;
1504 		}
1505 
1506 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1507 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1508 
1509 		if (pkt->entry_status != 0) {
1510 			ql_error_entry(ha, pkt, done_q, set_flags,
1511 			    reset_flags);
1512 		} else {
1513 			switch (pkt->entry_type) {
1514 			case STATUS_TYPE:
1515 				status |= CFG_IST(ha, CFG_CTRL_242581) ?
1516 				    ql_24xx_status_entry(ha,
1517 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1518 				    reset_flags) :
1519 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1520 				    done_q, set_flags, reset_flags);
1521 				break;
1522 			case STATUS_CONT_TYPE:
1523 				ql_status_cont_entry(ha,
1524 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1525 				    reset_flags);
1526 				break;
1527 			case IP_TYPE:
1528 			case IP_A64_TYPE:
1529 			case IP_CMD_TYPE:
1530 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1531 				    set_flags, reset_flags);
1532 				break;
1533 			case IP_RECEIVE_TYPE:
1534 				ql_ip_rcv_entry(ha,
1535 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1536 				    reset_flags);
1537 				break;
1538 			case IP_RECEIVE_CONT_TYPE:
1539 				ql_ip_rcv_cont_entry(ha,
1540 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1541 				    set_flags, reset_flags);
1542 				break;
1543 			case IP_24XX_RECEIVE_TYPE:
1544 				ql_ip_24xx_rcv_entry(ha,
1545 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1546 				    set_flags, reset_flags);
1547 				break;
1548 			case MS_TYPE:
1549 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1550 				    set_flags, reset_flags);
1551 				break;
1552 			case REPORT_ID_TYPE:
1553 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1554 				    done_q, set_flags, reset_flags);
1555 				break;
1556 			case ELS_PASSTHRU_TYPE:
1557 				ql_els_passthru_entry(ha,
1558 				    (els_passthru_entry_rsp_t *)pkt,
1559 				    done_q, set_flags, reset_flags);
1560 				break;
1561 			case IP_BUF_POOL_TYPE:
1562 			case MARKER_TYPE:
1563 			case VP_MODIFY_TYPE:
1564 			case VP_CONTROL_TYPE:
1565 				break;
1566 			default:
1567 				EL(ha, "Unknown IOCB entry type=%xh\n",
1568 				    pkt->entry_type);
1569 				break;
1570 			}
1571 		}
1572 	}
1573 
1574 	/* Inform RISC of processed responses. */
1575 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1576 
1577 	/* RESET packet received delay for possible async event. */
1578 	if (status & BIT_0) {
1579 		drv_usecwait(500000);
1580 	}
1581 
1582 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1583 }
1584 
1585 /*
1586  * ql_error_entry
1587  *	Processes error entry.
1588  *
1589  * Input:
1590  *	ha = adapter state pointer.
1591  *	pkt = entry pointer.
1592  *	done_q = head pointer to done queue.
1593  *	set_flags = task daemon flags to set.
1594  *	reset_flags = task daemon flags to reset.
1595  *
1596  * Context:
1597  *	Interrupt or Kernel context, no mailbox commands allowed.
1598  */
1599 /* ARGSUSED */
1600 static void
1601 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1602     uint32_t *set_flags, uint32_t *reset_flags)
1603 {
1604 	ql_srb_t	*sp;
1605 	uint32_t	index, cnt;
1606 
1607 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1608 		EL(ha, "Aborted command\n");
1609 		return;
1610 	}
1611 
1612 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1613 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1614 
1615 	if (pkt->entry_status & BIT_6) {
1616 		EL(ha, "Request Queue DMA error\n");
1617 	} else if (pkt->entry_status & BIT_5) {
1618 		EL(ha, "Invalid Entry Order\n");
1619 	} else if (pkt->entry_status & BIT_4) {
1620 		EL(ha, "Invalid Entry Count\n");
1621 	} else if (pkt->entry_status & BIT_3) {
1622 		EL(ha, "Invalid Entry Parameter\n");
1623 	} else if (pkt->entry_status & BIT_2) {
1624 		EL(ha, "Invalid Entry Type\n");
1625 	} else if (pkt->entry_status & BIT_1) {
1626 		EL(ha, "Busy\n");
1627 	} else {
1628 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1629 	}
1630 
1631 	/* Get handle. */
1632 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1633 	index = cnt & OSC_INDEX_MASK;
1634 
1635 	/* Validate handle. */
1636 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1637 	    NULL;
1638 
1639 	if (sp != NULL && sp->handle == cnt) {
1640 		ha->outstanding_cmds[index] = NULL;
1641 		sp->handle = 0;
1642 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1643 
1644 		/* Bad payload or header */
1645 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1646 			/* Bad payload or header, set error status. */
1647 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1648 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1649 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1650 		} else {
1651 			/* Set error status. */
1652 			sp->pkt->pkt_reason = CS_UNKNOWN;
1653 		}
1654 
1655 		/* Set completed status. */
1656 		sp->flags |= SRB_ISP_COMPLETED;
1657 
1658 		/* Place command on done queue. */
1659 		ql_add_link_b(done_q, &sp->cmd);
1660 
1661 	} else {
1662 		if (sp == NULL) {
1663 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
1664 		} else {
1665 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1666 			    cnt, sp->handle);
1667 		}
1668 
1669 		(void) ql_binary_fw_dump(ha, FALSE);
1670 
1671 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1672 		    ABORT_ISP_ACTIVE))) {
1673 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1674 			*set_flags |= ISP_ABORT_NEEDED;
1675 		}
1676 	}
1677 
1678 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1679 }
1680 
1681 /*
1682  * ql_status_entry
1683  *	Processes received ISP2200-2300 status entry.
1684  *
1685  * Input:
1686  *	ha:		adapter state pointer.
1687  *	pkt:		entry pointer.
1688  *	done_q:		done queue pointer.
1689  *	set_flags:	task daemon flags to set.
1690  *	reset_flags:	task daemon flags to reset.
1691  *
1692  * Returns:
1693  *	BIT_0 = CS_RESET status received.
1694  *
1695  * Context:
1696  *	Interrupt or Kernel context, no mailbox commands allowed.
1697  */
1698 /* ARGSUSED */
1699 static int
1700 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1701     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1702 {
1703 	ql_srb_t		*sp;
1704 	uint32_t		index, cnt;
1705 	uint16_t		comp_status;
1706 	int			rval = 0;
1707 
1708 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1709 
1710 	/* Get handle. */
1711 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1712 	index = cnt & OSC_INDEX_MASK;
1713 
1714 	/* Validate handle. */
1715 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1716 	    NULL;
1717 
1718 	if (sp != NULL && sp->handle == cnt) {
1719 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1720 		    &pkt->comp_status);
1721 
1722 		/*
1723 		 * We dont care about SCSI QFULLs.
1724 		 */
1725 		if (comp_status == CS_QUEUE_FULL) {
1726 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1727 			    sp->lun_queue->target_queue->d_id.b24,
1728 			    sp->lun_queue->lun_no);
1729 			comp_status = CS_COMPLETE;
1730 		}
1731 
1732 		/*
1733 		 * 2300 firmware marks completion status as data underrun
1734 		 * for scsi qfulls. Make it transport complete.
1735 		 */
1736 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1737 		    (comp_status == CS_DATA_UNDERRUN) &&
1738 		    (pkt->scsi_status_l != 0)) {
1739 			comp_status = CS_COMPLETE;
1740 		}
1741 
1742 		/*
1743 		 * Workaround T3 issue where we do not get any data xferred
1744 		 * but get back a good status.
1745 		 */
1746 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1747 		    comp_status == CS_COMPLETE &&
1748 		    pkt->scsi_status_l == 0 &&
1749 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1750 		    pkt->residual_length == 0 &&
1751 		    sp->fcp &&
1752 		    sp->fcp->fcp_data_len != 0 &&
1753 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1754 		    SF_DATA_OUT) {
1755 			comp_status = CS_ABORTED;
1756 		}
1757 
1758 		if (sp->flags & SRB_MS_PKT) {
1759 			/*
1760 			 * Ideally it should never be true. But there
1761 			 * is a bug in FW which upon receiving invalid
1762 			 * parameters in MS IOCB returns it as
1763 			 * status entry and not as ms entry type.
1764 			 */
1765 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1766 			    set_flags, reset_flags);
1767 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1768 			    ha->instance);
1769 			return (0);
1770 		}
1771 
1772 		ha->outstanding_cmds[index] = NULL;
1773 		sp->handle = 0;
1774 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1775 
1776 		/*
1777 		 * Fast path to good SCSI I/O completion
1778 		 */
1779 		if ((comp_status == CS_COMPLETE) &
1780 		    (!pkt->scsi_status_l) &
1781 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1782 			/* Set completed status. */
1783 			sp->flags |= SRB_ISP_COMPLETED;
1784 			sp->pkt->pkt_reason = comp_status;
1785 			ql_fast_fcp_post(sp);
1786 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1787 			    ha->instance);
1788 			return (0);
1789 		}
1790 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1791 		    reset_flags);
1792 	} else {
1793 		if (sp == NULL) {
1794 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
1795 		} else {
1796 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1797 			    cnt, sp->handle);
1798 		}
1799 
1800 		(void) ql_binary_fw_dump(ha, FALSE);
1801 
1802 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1803 		    ABORT_ISP_ACTIVE))) {
1804 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1805 			*set_flags |= ISP_ABORT_NEEDED;
1806 		}
1807 	}
1808 
1809 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1810 
1811 	return (rval);
1812 }
1813 
1814 /*
1815  * ql_24xx_status_entry
1816  *	Processes received ISP24xx status entry.
1817  *
1818  * Input:
1819  *	ha:		adapter state pointer.
1820  *	pkt:		entry pointer.
1821  *	done_q:		done queue pointer.
1822  *	set_flags:	task daemon flags to set.
1823  *	reset_flags:	task daemon flags to reset.
1824  *
1825  * Returns:
1826  *	BIT_0 = CS_RESET status received.
1827  *
1828  * Context:
1829  *	Interrupt or Kernel context, no mailbox commands allowed.
1830  */
1831 /* ARGSUSED */
1832 static int
1833 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1834     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1835 {
1836 	ql_srb_t		*sp;
1837 	uint32_t		index;
1838 	uint32_t		resp_identifier;
1839 	uint16_t		comp_status;
1840 	int			rval = 0;
1841 
1842 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1843 
1844 	/* Get the response identifier. */
1845 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1846 
1847 	/* extract the outstanding cmds index */
1848 	index = resp_identifier & OSC_INDEX_MASK;
1849 
1850 	/* Validate the index and get the associated srb pointer */
1851 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1852 	    NULL;
1853 
1854 	if (sp != NULL && sp->handle == resp_identifier) {
1855 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1856 		    &pkt->comp_status);
1857 
1858 		/*
1859 		 * We dont care about SCSI QFULLs.
1860 		 */
1861 		if (comp_status == CS_QUEUE_FULL) {
1862 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1863 			    sp->lun_queue->target_queue->d_id.b24,
1864 			    sp->lun_queue->lun_no);
1865 			comp_status = CS_COMPLETE;
1866 		}
1867 
1868 		/*
1869 		 * 2300 firmware marks completion status as data underrun
1870 		 * for scsi qfulls. Make it transport complete.
1871 		 */
1872 		if ((comp_status == CS_DATA_UNDERRUN) &&
1873 		    (pkt->scsi_status_l != 0)) {
1874 			comp_status = CS_COMPLETE;
1875 		}
1876 
1877 		/*
1878 		 * Workaround T3 issue where we do not get any data xferred
1879 		 * but get back a good status.
1880 		 */
1881 		if (comp_status == CS_COMPLETE &&
1882 		    pkt->scsi_status_l == 0 &&
1883 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1884 		    pkt->residual_length != 0 &&
1885 		    sp->fcp &&
1886 		    sp->fcp->fcp_data_len != 0 &&
1887 		    sp->fcp->fcp_cntl.cntl_write_data) {
1888 			comp_status = CS_ABORTED;
1889 		}
1890 
1891 		if (sp->flags & SRB_MS_PKT) {
1892 			/*
1893 			 * Ideally it should never be true. But there
1894 			 * is a bug in FW which upon receiving invalid
1895 			 * parameters in MS IOCB returns it as
1896 			 * status entry and not as ms entry type.
1897 			 */
1898 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1899 			    set_flags, reset_flags);
1900 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1901 			    ha->instance);
1902 			return (0);
1903 		}
1904 
1905 		ha->outstanding_cmds[index] = NULL;
1906 		sp->handle = 0;
1907 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1908 
1909 		/*
1910 		 * Fast path to good SCSI I/O completion
1911 		 */
1912 		if ((comp_status == CS_COMPLETE) &
1913 		    (!pkt->scsi_status_l) &
1914 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1915 			/* Set completed status. */
1916 			sp->flags |= SRB_ISP_COMPLETED;
1917 			sp->pkt->pkt_reason = comp_status;
1918 			ql_fast_fcp_post(sp);
1919 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1920 			    ha->instance);
1921 			return (0);
1922 		}
1923 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
1924 		    set_flags, reset_flags);
1925 	} else {
1926 		if (sp == NULL) {
1927 			EL(ha, "unknown IOCB handle=%xh\n", resp_identifier);
1928 		} else {
1929 			EL(sp->ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1930 			    resp_identifier, sp->handle);
1931 		}
1932 
1933 		(void) ql_binary_fw_dump(ha, FALSE);
1934 
1935 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1936 		    ABORT_ISP_ACTIVE))) {
1937 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1938 			*set_flags |= ISP_ABORT_NEEDED;
1939 		}
1940 	}
1941 
1942 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1943 
1944 	return (rval);
1945 }
1946 
1947 /*
1948  * ql_status_error
1949  *	Processes received ISP status entry error.
1950  *
1951  * Input:
1952  *	ha:		adapter state pointer.
1953  *	sp:		SRB pointer.
1954  *	pkt:		entry pointer.
1955  *	done_q:		done queue pointer.
1956  *	set_flags:	task daemon flags to set.
1957  *	reset_flags:	task daemon flags to reset.
1958  *
1959  * Returns:
1960  *	BIT_0 = CS_RESET status received.
1961  *
1962  * Context:
1963  *	Interrupt or Kernel context, no mailbox commands allowed.
1964  */
1965 /* ARGSUSED */
1966 static int
1967 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
1968     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1969 {
1970 	uint32_t		sense_sz = 0;
1971 	uint32_t		cnt;
1972 	ql_tgt_t		*tq;
1973 	fcp_rsp_t		*fcpr;
1974 	struct fcp_rsp_info	*rsp;
1975 	int			rval = 0;
1976 
1977 	struct {
1978 		uint8_t		*rsp_info;
1979 		uint8_t		*req_sense_data;
1980 		uint32_t	residual_length;
1981 		uint32_t	fcp_residual_length;
1982 		uint32_t	rsp_info_length;
1983 		uint32_t	req_sense_length;
1984 		uint16_t	comp_status;
1985 		uint8_t		state_flags_l;
1986 		uint8_t		state_flags_h;
1987 		uint8_t		scsi_status_l;
1988 		uint8_t		scsi_status_h;
1989 	} sts;
1990 
1991 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1992 
1993 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1994 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
1995 
1996 		/* Setup status. */
1997 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1998 		    &pkt24->comp_status);
1999 		sts.scsi_status_l = pkt24->scsi_status_l;
2000 		sts.scsi_status_h = pkt24->scsi_status_h;
2001 
2002 		/* Setup firmware residuals. */
2003 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2004 		    ddi_get32(ha->hba_buf.acc_handle,
2005 		    (uint32_t *)&pkt24->residual_length) : 0;
2006 
2007 		/* Setup FCP residuals. */
2008 		sts.fcp_residual_length = sts.scsi_status_h &
2009 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2010 		    ddi_get32(ha->hba_buf.acc_handle,
2011 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2012 
2013 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2014 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2015 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2016 
2017 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2018 			    sts.residual_length,
2019 			    pkt24->fcp_rsp_residual_count);
2020 			sts.scsi_status_h = (uint8_t)
2021 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2022 		}
2023 
2024 		/* Setup state flags. */
2025 		sts.state_flags_l = pkt24->state_flags_l;
2026 		sts.state_flags_h = pkt24->state_flags_h;
2027 
2028 		if (sp->fcp->fcp_data_len &&
2029 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2030 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2031 			sts.state_flags_h = (uint8_t)
2032 			    (sts.state_flags_h | SF_GOT_BUS |
2033 			    SF_GOT_TARGET | SF_SENT_CMD |
2034 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2035 		} else {
2036 			sts.state_flags_h = (uint8_t)
2037 			    (sts.state_flags_h | SF_GOT_BUS |
2038 			    SF_GOT_TARGET | SF_SENT_CMD |
2039 			    SF_GOT_STATUS);
2040 		}
2041 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2042 			sts.state_flags_l = (uint8_t)
2043 			    (sts.state_flags_l | SF_DATA_OUT);
2044 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2045 			sts.state_flags_l = (uint8_t)
2046 			    (sts.state_flags_l | SF_DATA_IN);
2047 		}
2048 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2049 			sts.state_flags_l = (uint8_t)
2050 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2051 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2052 			sts.state_flags_l = (uint8_t)
2053 			    (sts.state_flags_l | SF_ORDERED_Q);
2054 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2055 			sts.state_flags_l = (uint8_t)
2056 			    (sts.state_flags_l | SF_SIMPLE_Q);
2057 		}
2058 
2059 		/* Setup FCP response info. */
2060 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2061 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2062 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2063 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2064 			if (sts.rsp_info_length >
2065 			    sizeof (struct fcp_rsp_info)) {
2066 				sts.rsp_info_length =
2067 				    sizeof (struct fcp_rsp_info);
2068 			}
2069 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2070 				ql_chg_endian(sts.rsp_info + cnt, 4);
2071 			}
2072 		} else {
2073 			sts.rsp_info_length = 0;
2074 		}
2075 
2076 		/* Setup sense data. */
2077 		sts.req_sense_data =
2078 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2079 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2080 			sts.req_sense_length =
2081 			    ddi_get32(ha->hba_buf.acc_handle,
2082 			    (uint32_t *)&pkt24->fcp_sense_length);
2083 			sts.state_flags_h = (uint8_t)
2084 			    (sts.state_flags_h | SF_ARQ_DONE);
2085 			sense_sz = (uint32_t)
2086 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2087 			    (uintptr_t)sts.req_sense_data);
2088 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2089 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2090 			}
2091 		} else {
2092 			sts.req_sense_length = 0;
2093 		}
2094 	} else {
2095 		/* Setup status. */
2096 		sts.comp_status = (uint16_t)ddi_get16(
2097 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2098 		sts.scsi_status_l = pkt23->scsi_status_l;
2099 		sts.scsi_status_h = pkt23->scsi_status_h;
2100 
2101 		/* Setup firmware residuals. */
2102 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2103 		    ddi_get32(ha->hba_buf.acc_handle,
2104 		    (uint32_t *)&pkt23->residual_length) : 0;
2105 
2106 		/* Setup FCP residuals. */
2107 		sts.fcp_residual_length = sts.scsi_status_h &
2108 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2109 		    sts.residual_length : 0;
2110 
2111 		/* Setup state flags. */
2112 		sts.state_flags_l = pkt23->state_flags_l;
2113 		sts.state_flags_h = pkt23->state_flags_h;
2114 
2115 		/* Setup FCP response info. */
2116 		sts.rsp_info = &pkt23->rsp_info[0];
2117 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2118 			sts.rsp_info_length = ddi_get16(
2119 			    ha->hba_buf.acc_handle,
2120 			    (uint16_t *)&pkt23->rsp_info_length);
2121 			if (sts.rsp_info_length >
2122 			    sizeof (struct fcp_rsp_info)) {
2123 				sts.rsp_info_length =
2124 				    sizeof (struct fcp_rsp_info);
2125 			}
2126 		} else {
2127 			sts.rsp_info_length = 0;
2128 		}
2129 
2130 		/* Setup sense data. */
2131 		sts.req_sense_data = &pkt23->req_sense_data[0];
2132 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2133 		    ddi_get16(ha->hba_buf.acc_handle,
2134 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2135 	}
2136 
2137 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2138 
2139 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2140 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2141 	    sizeof (fcp_rsp_t));
2142 
2143 	tq = sp->lun_queue->target_queue;
2144 
2145 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2146 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2147 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2148 	}
2149 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2150 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2151 	}
2152 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2153 		fcpr->fcp_u.fcp_status.resid_over = 1;
2154 	}
2155 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2156 		fcpr->fcp_u.fcp_status.resid_under = 1;
2157 	}
2158 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2159 
2160 	/* Set ISP completion status */
2161 	sp->pkt->pkt_reason = sts.comp_status;
2162 
2163 	/* Update statistics. */
2164 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2165 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2166 
2167 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2168 		if (sense_sz > sts.rsp_info_length) {
2169 			sense_sz = sts.rsp_info_length;
2170 		}
2171 
2172 		/* copy response information data. */
2173 		if (sense_sz) {
2174 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2175 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2176 		}
2177 		fcpr->fcp_response_len = sense_sz;
2178 
2179 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2180 		    fcpr->fcp_response_len);
2181 
2182 		switch (*(sts.rsp_info + 3)) {
2183 		case FCP_NO_FAILURE:
2184 			break;
2185 		case FCP_DL_LEN_MISMATCH:
2186 			ha->adapter_stats->d_stats[lobyte(
2187 			    tq->loop_id)].dl_len_mismatches++;
2188 			break;
2189 		case FCP_CMND_INVALID:
2190 			break;
2191 		case FCP_DATA_RO_MISMATCH:
2192 			ha->adapter_stats->d_stats[lobyte(
2193 			    tq->loop_id)].data_ro_mismatches++;
2194 			break;
2195 		case FCP_TASK_MGMT_NOT_SUPPTD:
2196 			break;
2197 		case FCP_TASK_MGMT_FAILED:
2198 			ha->adapter_stats->d_stats[lobyte(
2199 			    tq->loop_id)].task_mgmt_failures++;
2200 			break;
2201 		default:
2202 			break;
2203 		}
2204 	} else {
2205 		/*
2206 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2207 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2208 		 */
2209 		fcpr->fcp_response_len = 0;
2210 	}
2211 
2212 	/* Set reset status received. */
2213 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2214 		rval |= BIT_0;
2215 	}
2216 
2217 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2218 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2219 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2220 	    ha->task_daemon_flags & LOOP_DOWN) {
2221 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2222 		    tq->d_id.b24, sp->lun_queue->lun_no);
2223 
2224 		/* Set retry status. */
2225 		sp->flags |= SRB_RETRY;
2226 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2227 	    tq->port_down_retry_count != 0 &&
2228 	    (sts.comp_status == CS_INCOMPLETE ||
2229 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2230 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2231 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2232 	    sts.comp_status == CS_PORT_BUSY)) {
2233 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2234 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2235 		    tq->port_down_retry_count);
2236 
2237 		/* Set retry status. */
2238 		sp->flags |= SRB_RETRY;
2239 
2240 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2241 			/* Acquire device queue lock. */
2242 			DEVICE_QUEUE_LOCK(tq);
2243 
2244 			tq->flags |= TQF_QUEUE_SUSPENDED;
2245 
2246 			/* Decrement port down count. */
2247 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2248 				tq->port_down_retry_count--;
2249 			}
2250 
2251 			DEVICE_QUEUE_UNLOCK(tq);
2252 
2253 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2254 			    == 0 &&
2255 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2256 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2257 				sp->ha->adapter_stats->d_stats[lobyte(
2258 				    tq->loop_id)].logouts_recvd++;
2259 				ql_send_logo(sp->ha, tq, done_q);
2260 			}
2261 
2262 			ADAPTER_STATE_LOCK(ha);
2263 			if (ha->port_retry_timer == 0) {
2264 				if ((ha->port_retry_timer =
2265 				    ha->port_down_retry_delay) == 0) {
2266 					*set_flags |=
2267 					    PORT_RETRY_NEEDED;
2268 				}
2269 			}
2270 			ADAPTER_STATE_UNLOCK(ha);
2271 		}
2272 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2273 	    (sts.comp_status == CS_RESET ||
2274 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2275 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2276 		if (sts.comp_status == CS_RESET) {
2277 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2278 			    tq->d_id.b24, sp->lun_queue->lun_no);
2279 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2280 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2281 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2282 			    tq->qfull_retry_count);
2283 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2284 				tq->flags |= TQF_QUEUE_SUSPENDED;
2285 
2286 				tq->qfull_retry_count--;
2287 
2288 				ADAPTER_STATE_LOCK(ha);
2289 				if (ha->port_retry_timer == 0) {
2290 					if ((ha->port_retry_timer =
2291 					    ha->qfull_retry_delay) ==
2292 					    0) {
2293 						*set_flags |=
2294 						    PORT_RETRY_NEEDED;
2295 					}
2296 				}
2297 				ADAPTER_STATE_UNLOCK(ha);
2298 			}
2299 		} else {
2300 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2301 			    tq->d_id.b24, sp->lun_queue->lun_no);
2302 		}
2303 
2304 		/* Set retry status. */
2305 		sp->flags |= SRB_RETRY;
2306 	} else {
2307 		fcpr->fcp_resid =
2308 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2309 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2310 
2311 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2312 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2313 
2314 			if (sts.scsi_status_l == STATUS_CHECK) {
2315 				sp->pkt->pkt_reason = CS_COMPLETE;
2316 			} else {
2317 				EL(ha, "transport error - "
2318 				    "underrun & invalid resid\n");
2319 				EL(ha, "ssh=%xh, ssl=%xh\n",
2320 				    sts.scsi_status_h, sts.scsi_status_l);
2321 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2322 			}
2323 		}
2324 
2325 		/* Ignore firmware underrun error. */
2326 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2327 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2328 		    (sts.scsi_status_l != STATUS_CHECK &&
2329 		    sts.scsi_status_l != STATUS_GOOD))) {
2330 			sp->pkt->pkt_reason = CS_COMPLETE;
2331 		}
2332 
2333 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2334 			ha->xioctl->DeviceErrorCount++;
2335 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2336 			    "\n", sts.comp_status, tq->d_id.b24,
2337 			    sp->lun_queue->lun_no);
2338 		}
2339 
2340 		/* Set target request sense data. */
2341 		if (sts.scsi_status_l == STATUS_CHECK) {
2342 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2343 
2344 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2345 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2346 				    sts.req_sense_data[2] !=
2347 				    KEY_UNIT_ATTENTION) {
2348 					ha->xioctl->DeviceErrorCount++;
2349 				}
2350 
2351 				sense_sz = sts.req_sense_length;
2352 
2353 				/* Insure data does not exceed buf. */
2354 				if (sp->pkt->pkt_rsplen <=
2355 				    (uint32_t)sizeof (fcp_rsp_t) +
2356 				    fcpr->fcp_response_len) {
2357 					sp->request_sense_length = 0;
2358 				} else {
2359 					sp->request_sense_length = (uint32_t)
2360 					    (sp->pkt->pkt_rsplen -
2361 					    sizeof (fcp_rsp_t) -
2362 					    fcpr->fcp_response_len);
2363 				}
2364 
2365 				if (sense_sz <
2366 				    sp->request_sense_length) {
2367 					sp->request_sense_length =
2368 					    sense_sz;
2369 				}
2370 
2371 				sp->request_sense_ptr = (caddr_t)rsp;
2372 
2373 				sense_sz = (uint32_t)
2374 				    (((uintptr_t)pkt23 +
2375 				    sizeof (sts_entry_t)) -
2376 				    (uintptr_t)sts.req_sense_data);
2377 				if (sp->request_sense_length <
2378 				    sense_sz) {
2379 					sense_sz =
2380 					    sp->request_sense_length;
2381 				}
2382 
2383 				fcpr->fcp_sense_len = sense_sz;
2384 
2385 				/* Move sense data. */
2386 				ddi_rep_get8(ha->hba_buf.acc_handle,
2387 				    (uint8_t *)sp->request_sense_ptr,
2388 				    sts.req_sense_data,
2389 				    (size_t)sense_sz,
2390 				    DDI_DEV_AUTOINCR);
2391 
2392 				sp->request_sense_ptr += sense_sz;
2393 				sp->request_sense_length -= sense_sz;
2394 				if (sp->request_sense_length != 0) {
2395 					ha->status_srb = sp;
2396 				}
2397 			}
2398 
2399 			if (sense_sz != 0) {
2400 				EL(sp->ha, "check condition sense data, "
2401 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2402 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2403 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2404 				    sp->lun_queue->lun_no,
2405 				    sts.req_sense_data[0],
2406 				    sts.req_sense_data[1],
2407 				    sts.req_sense_data[2],
2408 				    sts.req_sense_data[3],
2409 				    sts.req_sense_data[4],
2410 				    sts.req_sense_data[5],
2411 				    sts.req_sense_data[6],
2412 				    sts.req_sense_data[7],
2413 				    sts.req_sense_data[8],
2414 				    sts.req_sense_data[9],
2415 				    sts.req_sense_data[10],
2416 				    sts.req_sense_data[11],
2417 				    sts.req_sense_data[12],
2418 				    sts.req_sense_data[13],
2419 				    sts.req_sense_data[14],
2420 				    sts.req_sense_data[15],
2421 				    sts.req_sense_data[16],
2422 				    sts.req_sense_data[17]);
2423 			} else {
2424 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2425 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2426 			}
2427 		}
2428 	}
2429 
2430 	/* Set completed status. */
2431 	sp->flags |= SRB_ISP_COMPLETED;
2432 
2433 	/* Place command on done queue. */
2434 	if (ha->status_srb == NULL) {
2435 		ql_add_link_b(done_q, &sp->cmd);
2436 	}
2437 
2438 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2439 
2440 	return (rval);
2441 }
2442 
2443 /*
2444  * ql_status_cont_entry
2445  *	Processes status continuation entry.
2446  *
2447  * Input:
2448  *	ha:		adapter state pointer.
2449  *	pkt:		entry pointer.
2450  *	done_q:		done queue pointer.
2451  *	set_flags:	task daemon flags to set.
2452  *	reset_flags:	task daemon flags to reset.
2453  *
2454  * Context:
2455  *	Interrupt or Kernel context, no mailbox commands allowed.
2456  */
2457 /* ARGSUSED */
2458 static void
2459 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2460     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2461 {
2462 	uint32_t	sense_sz, index;
2463 	ql_srb_t	*sp = ha->status_srb;
2464 
2465 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2466 
2467 	if (sp != NULL && sp->request_sense_length) {
2468 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2469 			sense_sz = sizeof (pkt->req_sense_data);
2470 		} else {
2471 			sense_sz = sp->request_sense_length;
2472 		}
2473 
2474 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2475 			for (index = 0; index < sense_sz; index += 4) {
2476 				ql_chg_endian((uint8_t *)
2477 				    &pkt->req_sense_data[0] + index, 4);
2478 			}
2479 		}
2480 
2481 		/* Move sense data. */
2482 		ddi_rep_get8(ha->hba_buf.acc_handle,
2483 		    (uint8_t *)sp->request_sense_ptr,
2484 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2485 		    DDI_DEV_AUTOINCR);
2486 
2487 		sp->request_sense_ptr += sense_sz;
2488 		sp->request_sense_length -= sense_sz;
2489 
2490 		/* Place command on done queue. */
2491 		if (sp->request_sense_length == 0) {
2492 			ql_add_link_b(done_q, &sp->cmd);
2493 			ha->status_srb = NULL;
2494 		}
2495 	}
2496 
2497 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2498 }
2499 
2500 /*
2501  * ql_ip_entry
2502  *	Processes received ISP IP entry.
2503  *
2504  * Input:
2505  *	ha:		adapter state pointer.
2506  *	pkt:		entry pointer.
2507  *	done_q:		done queue pointer.
2508  *	set_flags:	task daemon flags to set.
2509  *	reset_flags:	task daemon flags to reset.
2510  *
2511  * Context:
2512  *	Interrupt or Kernel context, no mailbox commands allowed.
2513  */
2514 /* ARGSUSED */
2515 static void
2516 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2517     uint32_t *set_flags, uint32_t *reset_flags)
2518 {
2519 	ql_srb_t	*sp;
2520 	uint32_t	index, cnt;
2521 	ql_tgt_t	*tq;
2522 
2523 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2524 
2525 	/* Get handle. */
2526 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2527 	index = cnt & OSC_INDEX_MASK;
2528 
2529 	/* Validate handle. */
2530 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
2531 	    NULL;
2532 
2533 	if (sp != NULL && sp->handle == cnt) {
2534 		ha->outstanding_cmds[index] = NULL;
2535 		sp->handle = 0;
2536 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2537 		tq = sp->lun_queue->target_queue;
2538 
2539 		/* Set ISP completion status */
2540 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2541 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2542 
2543 			sp->pkt->pkt_reason = ddi_get16(
2544 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
2545 		} else {
2546 			sp->pkt->pkt_reason = ddi_get16(
2547 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2548 		}
2549 
2550 		if (ha->task_daemon_flags & LOOP_DOWN) {
2551 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2552 			    tq->d_id.b24);
2553 
2554 			/* Set retry status. */
2555 			sp->flags |= SRB_RETRY;
2556 
2557 		} else if (tq->port_down_retry_count &&
2558 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2559 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2560 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2561 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2562 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2563 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2564 			    sp->pkt->pkt_reason, tq->d_id.b24,
2565 			    tq->port_down_retry_count);
2566 
2567 			/* Set retry status. */
2568 			sp->flags |= SRB_RETRY;
2569 
2570 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2571 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2572 				ha->adapter_stats->d_stats[lobyte(
2573 				    tq->loop_id)].logouts_recvd++;
2574 				ql_send_logo(ha, tq, done_q);
2575 			}
2576 
2577 			/* Acquire device queue lock. */
2578 			DEVICE_QUEUE_LOCK(tq);
2579 
2580 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2581 				tq->flags |= TQF_QUEUE_SUSPENDED;
2582 
2583 				tq->port_down_retry_count--;
2584 
2585 				ADAPTER_STATE_LOCK(ha);
2586 				if (ha->port_retry_timer == 0) {
2587 					if ((ha->port_retry_timer =
2588 					    ha->port_down_retry_delay) == 0) {
2589 						*set_flags |=
2590 						    PORT_RETRY_NEEDED;
2591 					}
2592 				}
2593 				ADAPTER_STATE_UNLOCK(ha);
2594 			}
2595 
2596 			/* Release device queue specific lock. */
2597 			DEVICE_QUEUE_UNLOCK(tq);
2598 
2599 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2600 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2601 
2602 			/* Set retry status. */
2603 			sp->flags |= SRB_RETRY;
2604 		} else {
2605 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2606 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2607 				    sp->pkt->pkt_reason, tq->d_id.b24);
2608 			}
2609 		}
2610 
2611 		/* Set completed status. */
2612 		sp->flags |= SRB_ISP_COMPLETED;
2613 
2614 		ql_add_link_b(done_q, &sp->cmd);
2615 
2616 	} else {
2617 		if (sp == NULL) {
2618 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
2619 		} else {
2620 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
2621 			    cnt, sp->handle);
2622 		}
2623 
2624 		(void) ql_binary_fw_dump(ha, FALSE);
2625 
2626 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2627 		    ABORT_ISP_ACTIVE))) {
2628 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
2629 			*set_flags |= ISP_ABORT_NEEDED;
2630 		}
2631 	}
2632 
2633 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2634 }
2635 
2636 /*
2637  * ql_ip_rcv_entry
2638  *	Processes received ISP IP buffers entry.
2639  *
2640  * Input:
2641  *	ha:		adapter state pointer.
2642  *	pkt:		entry pointer.
2643  *	done_q:		done queue pointer.
2644  *	set_flags:	task daemon flags to set.
2645  *	reset_flags:	task daemon flags to reset.
2646  *
2647  * Context:
2648  *	Interrupt or Kernel context, no mailbox commands allowed.
2649  */
2650 /* ARGSUSED */
2651 static void
2652 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2653     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2654 {
2655 	port_id_t	s_id;
2656 	uint16_t	index;
2657 	uint8_t		cnt;
2658 	ql_tgt_t	*tq;
2659 
2660 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2661 
2662 	/* Locate device queue. */
2663 	s_id.b.al_pa = pkt->s_id[0];
2664 	s_id.b.area = pkt->s_id[1];
2665 	s_id.b.domain = pkt->s_id[2];
2666 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2667 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2668 		return;
2669 	}
2670 
2671 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2672 	    &pkt->seq_length);
2673 	tq->ub_total_seg_cnt = pkt->segment_count;
2674 	tq->ub_seq_id = ++ha->ub_seq_id;
2675 	tq->ub_seq_cnt = 0;
2676 	tq->ub_frame_ro = 0;
2677 	tq->ub_loop_id = pkt->loop_id;
2678 	ha->rcv_dev_q = tq;
2679 
2680 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2681 	    tq->ub_total_seg_cnt; cnt++) {
2682 
2683 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2684 		    &pkt->buffer_handle[cnt]);
2685 
2686 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2687 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2688 			*set_flags |= ISP_ABORT_NEEDED;
2689 			break;
2690 		}
2691 	}
2692 
2693 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2694 }
2695 
2696 /*
2697  * ql_ip_rcv_cont_entry
2698  *	Processes received ISP IP buffers continuation entry.
2699  *
2700  * Input:
2701  *	ha:		adapter state pointer.
2702  *	pkt:		entry pointer.
2703  *	done_q:		done queue pointer.
2704  *	set_flags:	task daemon flags to set.
2705  *	reset_flags:	task daemon flags to reset.
2706  *
2707  * Context:
2708  *	Interrupt or Kernel context, no mailbox commands allowed.
2709  */
2710 /* ARGSUSED */
2711 static void
2712 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2713     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2714 {
2715 	uint16_t	index;
2716 	uint8_t		cnt;
2717 	ql_tgt_t	*tq;
2718 
2719 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2720 
2721 	if ((tq = ha->rcv_dev_q) == NULL) {
2722 		EL(ha, "No IP receive device\n");
2723 		return;
2724 	}
2725 
2726 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2727 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2728 
2729 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2730 		    &pkt->buffer_handle[cnt]);
2731 
2732 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2733 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2734 			*set_flags |= ISP_ABORT_NEEDED;
2735 			break;
2736 		}
2737 	}
2738 
2739 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2740 }
2741 
2742 /*
2743  * ip_rcv_24xx_entry_t
2744  *	Processes received ISP24xx IP buffers entry.
2745  *
2746  * Input:
2747  *	ha:		adapter state pointer.
2748  *	pkt:		entry pointer.
2749  *	done_q:		done queue pointer.
2750  *	set_flags:	task daemon flags to set.
2751  *	reset_flags:	task daemon flags to reset.
2752  *
2753  * Context:
2754  *	Interrupt or Kernel context, no mailbox commands allowed.
2755  */
2756 /* ARGSUSED */
2757 static void
2758 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2759     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2760 {
2761 	port_id_t	s_id;
2762 	uint16_t	index;
2763 	uint8_t		cnt;
2764 	ql_tgt_t	*tq;
2765 
2766 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2767 
2768 	/* Locate device queue. */
2769 	s_id.b.al_pa = pkt->s_id[0];
2770 	s_id.b.area = pkt->s_id[1];
2771 	s_id.b.domain = pkt->s_id[2];
2772 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2773 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2774 		return;
2775 	}
2776 
2777 	if (tq->ub_total_seg_cnt == 0) {
2778 		tq->ub_sequence_length = (uint16_t)ddi_get16(
2779 		    ha->hba_buf.acc_handle, &pkt->seq_length);
2780 		tq->ub_total_seg_cnt = pkt->segment_count;
2781 		tq->ub_seq_id = ++ha->ub_seq_id;
2782 		tq->ub_seq_cnt = 0;
2783 		tq->ub_frame_ro = 0;
2784 		tq->ub_loop_id = (uint16_t)ddi_get16(
2785 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2786 	}
2787 
2788 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2789 	    tq->ub_total_seg_cnt; cnt++) {
2790 
2791 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2792 		    &pkt->buffer_handle[cnt]);
2793 
2794 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2795 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2796 			*set_flags |= ISP_ABORT_NEEDED;
2797 			break;
2798 		}
2799 	}
2800 
2801 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2802 }
2803 
2804 /*
2805  * ql_ms_entry
2806  *	Processes received Name/Management/CT Pass-Through entry.
2807  *
2808  * Input:
2809  *	ha:		adapter state pointer.
2810  *	pkt23:		entry pointer.
2811  *	done_q:		done queue pointer.
2812  *	set_flags:	task daemon flags to set.
2813  *	reset_flags:	task daemon flags to reset.
2814  *
2815  * Context:
2816  *	Interrupt or Kernel context, no mailbox commands allowed.
2817  */
2818 /* ARGSUSED */
2819 static void
2820 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2821     uint32_t *set_flags, uint32_t *reset_flags)
2822 {
2823 	ql_srb_t		*sp;
2824 	uint32_t		index, cnt;
2825 	ql_tgt_t		*tq;
2826 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
2827 
2828 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2829 
2830 	/* Get handle. */
2831 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2832 	index = cnt & OSC_INDEX_MASK;
2833 
2834 	/* Validate handle. */
2835 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
2836 	    NULL;
2837 
2838 	if (sp != NULL && sp->handle == cnt) {
2839 		if (!(sp->flags & SRB_MS_PKT)) {
2840 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
2841 			    sp->flags);
2842 			*set_flags |= ISP_ABORT_NEEDED;
2843 			return;
2844 		}
2845 
2846 		ha->outstanding_cmds[index] = NULL;
2847 		sp->handle = 0;
2848 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2849 		tq = sp->lun_queue->target_queue;
2850 
2851 		/* Set ISP completion status */
2852 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2853 			sp->pkt->pkt_reason = ddi_get16(
2854 			    ha->hba_buf.acc_handle, &pkt24->status);
2855 		} else {
2856 			sp->pkt->pkt_reason = ddi_get16(
2857 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2858 		}
2859 
2860 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
2861 		    sp->retry_count) {
2862 			EL(ha, "Resouce Unavailable Retry = %d\n",
2863 			    sp->retry_count);
2864 
2865 			/* Set retry status. */
2866 			sp->retry_count--;
2867 			sp->flags |= SRB_RETRY;
2868 
2869 			/* Acquire device queue lock. */
2870 			DEVICE_QUEUE_LOCK(tq);
2871 
2872 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
2873 				tq->flags |= TQF_QUEUE_SUSPENDED;
2874 
2875 				ADAPTER_STATE_LOCK(ha);
2876 				if (ha->port_retry_timer == 0) {
2877 					ha->port_retry_timer = 2;
2878 				}
2879 				ADAPTER_STATE_UNLOCK(ha);
2880 			}
2881 
2882 			/* Release device queue specific lock. */
2883 			DEVICE_QUEUE_UNLOCK(tq);
2884 
2885 		} else if (tq->port_down_retry_count &&
2886 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2887 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2888 			EL(ha, "Port Down Retry\n");
2889 
2890 			/* Set retry status. */
2891 			sp->flags |= SRB_RETRY;
2892 
2893 			/* Acquire device queue lock. */
2894 			DEVICE_QUEUE_LOCK(tq);
2895 
2896 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2897 				tq->flags |= TQF_QUEUE_SUSPENDED;
2898 
2899 				tq->port_down_retry_count--;
2900 
2901 				ADAPTER_STATE_LOCK(ha);
2902 				if (ha->port_retry_timer == 0) {
2903 					if ((ha->port_retry_timer =
2904 					    ha->port_down_retry_delay) == 0) {
2905 						*set_flags |=
2906 						    PORT_RETRY_NEEDED;
2907 					}
2908 				}
2909 				ADAPTER_STATE_UNLOCK(ha);
2910 			}
2911 
2912 			/* Release device queue specific lock. */
2913 			DEVICE_QUEUE_UNLOCK(tq);
2914 
2915 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2916 			EL(ha, "Reset Retry\n");
2917 
2918 			/* Set retry status. */
2919 			sp->flags |= SRB_RETRY;
2920 
2921 		} else if (CFG_IST(ha, CFG_CTRL_242581) &&
2922 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
2923 			cnt = ddi_get32(ha->hba_buf.acc_handle,
2924 			    &pkt24->resp_byte_count);
2925 			if (cnt < sizeof (fc_ct_header_t)) {
2926 				EL(ha, "Data underrun\n");
2927 			} else {
2928 				sp->pkt->pkt_reason = CS_COMPLETE;
2929 			}
2930 
2931 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
2932 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
2933 		}
2934 
2935 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
2936 			/*EMPTY*/
2937 			QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
2938 			    ha->instance, sp->pkt->pkt_cmd[8],
2939 			    sp->pkt->pkt_cmd[9]);
2940 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
2941 		}
2942 
2943 		/* For nameserver restore command, management change header. */
2944 		if ((sp->flags & SRB_RETRY) == 0) {
2945 			tq->d_id.b24 == 0xfffffc ?
2946 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
2947 			    sp->pkt->pkt_cmd, B_TRUE) :
2948 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
2949 			    sp->pkt->pkt_resp, B_TRUE);
2950 		}
2951 
2952 		/* Set completed status. */
2953 		sp->flags |= SRB_ISP_COMPLETED;
2954 
2955 		/* Place command on done queue. */
2956 		ql_add_link_b(done_q, &sp->cmd);
2957 
2958 	} else {
2959 		if (sp == NULL) {
2960 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
2961 		} else {
2962 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
2963 			    cnt, sp->handle);
2964 		}
2965 
2966 		(void) ql_binary_fw_dump(ha, FALSE);
2967 
2968 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2969 		    ABORT_ISP_ACTIVE))) {
2970 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
2971 			*set_flags |= ISP_ABORT_NEEDED;
2972 		}
2973 	}
2974 
2975 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2976 }
2977 
2978 /*
2979  * ql_report_id_entry
2980  *	Processes received Name/Management/CT Pass-Through entry.
2981  *
2982  * Input:
2983  *	ha:		adapter state pointer.
2984  *	pkt23:		entry pointer.
2985  *	done_q:		done queue pointer.
2986  *	set_flags:	task daemon flags to set.
2987  *	reset_flags:	task daemon flags to reset.
2988  *
2989  * Context:
2990  *	Interrupt or Kernel context, no mailbox commands allowed.
2991  */
2992 /* ARGSUSED */
2993 static void
2994 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
2995     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2996 {
2997 	ql_adapter_state_t	*vha;
2998 
2999 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3000 
3001 	EL(ha, "format=%d, vp=%d, status=%d\n",
3002 	    pkt->format, pkt->vp_index, pkt->status);
3003 
3004 	if (pkt->format == 1) {
3005 		/* Locate port state structure. */
3006 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3007 			if (vha->vp_index == pkt->vp_index) {
3008 				break;
3009 			}
3010 		}
3011 		if (vha != NULL && vha->vp_index != 0 &&
3012 		    (pkt->status == CS_COMPLETE ||
3013 		    pkt->status == CS_PORT_ID_CHANGE)) {
3014 			*set_flags |= LOOP_RESYNC_NEEDED;
3015 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3016 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3017 			TASK_DAEMON_LOCK(ha);
3018 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3019 			vha->task_daemon_flags &= ~LOOP_DOWN;
3020 			TASK_DAEMON_UNLOCK(ha);
3021 		}
3022 	}
3023 
3024 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3025 }
3026 
3027 /*
3028  * ql_els_entry
3029  *	Processes received ELS Pass-Through entry.
3030  *
3031  * Input:
3032  *	ha:		adapter state pointer.
3033  *	pkt23:		entry pointer.
3034  *	done_q:		done queue pointer.
3035  *	set_flags:	task daemon flags to set.
3036  *	reset_flags:	task daemon flags to reset.
3037  *
3038  * Context:
3039  *	Interrupt or Kernel context, no mailbox commands allowed.
3040  */
3041 /* ARGSUSED */
3042 static void
3043 ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp,
3044     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3045 {
3046 	ql_tgt_t	*tq;
3047 	port_id_t	d_id, s_id;
3048 	ql_srb_t	*srb;
3049 	uint32_t	cnt, index;
3050 
3051 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3052 	/* Get handle. */
3053 	cnt = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle);
3054 	index = cnt & OSC_INDEX_MASK;
3055 
3056 	/* Validate handle. */
3057 	srb = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
3058 	    NULL;
3059 
3060 	(void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3061 	    DDI_DMA_SYNC_FORKERNEL);
3062 
3063 	if (srb != NULL && srb->handle == cnt) {
3064 		if (!(srb->flags & SRB_ELS_PKT)) {
3065 			EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed",
3066 			    srb->flags);
3067 			*set_flags |= ISP_ABORT_NEEDED;
3068 			return;
3069 		}
3070 		ha->outstanding_cmds[index] = NULL;
3071 		srb->handle = 0;
3072 		srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3073 
3074 		/* Set ISP completion status */
3075 		srb->pkt->pkt_reason = ddi_get16(
3076 		    ha->hba_buf.acc_handle, &rsp->comp_status);
3077 
3078 		if (srb->pkt->pkt_reason != CS_COMPLETE) {
3079 			la_els_rjt_t	rjt;
3080 			EL(ha, "status err=%xh\n", srb->pkt->pkt_reason);
3081 
3082 			if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3083 				EL(ha, "e1=%xh e2=%xh\n",
3084 				    rsp->error_subcode1, rsp->error_subcode2);
3085 			}
3086 
3087 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3088 
3089 			/* Build RJT in the response. */
3090 			rjt.ls_code.ls_code = LA_ELS_RJT;
3091 			rjt.reason = FC_REASON_NO_CONNECTION;
3092 
3093 			ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3094 			    (uint8_t *)srb->pkt->pkt_resp,
3095 			    sizeof (rjt), DDI_DEV_AUTOINCR);
3096 
3097 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3098 			srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3099 		}
3100 
3101 		if (srb->pkt->pkt_reason == CS_COMPLETE) {
3102 			uint8_t		opcode;
3103 			uint16_t	loop_id;
3104 
3105 			/* Indicate ISP completion */
3106 			srb->flags |= SRB_ISP_COMPLETED;
3107 
3108 			loop_id = ddi_get16(ha->hba_buf.acc_handle,
3109 			    &rsp->n_port_hdl);
3110 
3111 			if (ha->topology & QL_N_PORT) {
3112 				/* create a target Q if there isn't one */
3113 				tq = ql_loop_id_to_queue(ha, loop_id);
3114 				if (tq == NULL) {
3115 					d_id.b.al_pa = rsp->d_id_7_0;
3116 					d_id.b.area = rsp->d_id_15_8;
3117 					d_id.b.domain = rsp->d_id_23_16;
3118 					/* Acquire adapter state lock. */
3119 					ADAPTER_STATE_LOCK(ha);
3120 
3121 					tq = ql_dev_init(ha, d_id, loop_id);
3122 					EL(ha, " tq = %x\n", tq);
3123 
3124 					ADAPTER_STATE_UNLOCK(ha);
3125 				}
3126 
3127 				/* on plogi success assume the chosen s_id */
3128 				opcode = ddi_get8(ha->hba_buf.acc_handle,
3129 				    &rsp->els_cmd_opcode);
3130 
3131 				EL(ha, "els_cmd_opcode=%x srb->pkt=%x\n",
3132 				    opcode, srb->pkt);
3133 
3134 				if (opcode == LA_ELS_PLOGI) {
3135 					s_id.b.al_pa = rsp->s_id_7_0;
3136 					s_id.b.area = rsp->s_id_15_8;
3137 					s_id.b.