1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2010 QLogic Corporation; ql_isr.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_init.h>
50 #include <ql_mbx.h>
51 #include <ql_nx.h>
52 #include <ql_xioctl.h>
53 
54 /*
55  * Local Function Prototypes.
56  */
57 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
58     uint32_t *);
59 static void ql_spurious_intr(ql_adapter_state_t *, int);
60 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
61     uint32_t *, int);
62 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
63     uint32_t *, uint32_t *, int);
64 static void ql_fast_fcp_post(ql_srb_t *);
65 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
66     uint32_t *, int);
67 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
68     uint32_t *, uint32_t *);
69 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
70     uint32_t *, uint32_t *);
71 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
72     ql_head_t *, uint32_t *, uint32_t *);
73 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
74     ql_head_t *, uint32_t *, uint32_t *);
75 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
76     ql_head_t *, uint32_t *, uint32_t *);
77 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
78     uint32_t *, uint32_t *);
79 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
80     ql_head_t *, uint32_t *, uint32_t *);
81 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
82     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
83 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
84     ql_head_t *, uint32_t *, uint32_t *);
85 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
86     uint32_t *, uint32_t *);
87 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
88     ql_head_t *, uint32_t *, uint32_t *);
89 static void ql_els_passthru_entry(ql_adapter_state_t *,
90     els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
91 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *,
92     uint32_t *, uint32_t *);
93 static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags);
94 
95 /*
96  * ql_isr
97  *	Process all INTX intr types.
98  *
99  * Input:
100  *	arg1:	adapter state pointer.
101  *
102  * Returns:
103  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
104  *
105  * Context:
106  *	Interrupt or Kernel context, no mailbox commands allowed.
107  */
108 /* ARGSUSED */
109 uint_t
110 ql_isr(caddr_t arg1)
111 {
112 	return (ql_isr_aif(arg1, 0));
113 }
114 
115 /*
116  * ql_isr_default
117  *	Process unknown/unvectored intr types
118  *
119  * Input:
120  *	arg1:	adapter state pointer.
121  *	arg2:	interrupt vector.
122  *
123  * Returns:
124  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
125  *
126  * Context:
127  *	Interrupt or Kernel context, no mailbox commands allowed.
128  */
129 /* ARGSUSED */
130 uint_t
131 ql_isr_default(caddr_t arg1, caddr_t arg2)
132 {
133 	ql_adapter_state_t	*ha = (void *)arg1;
134 
135 	EL(ha, "isr_default called: idx=%x\n", arg2);
136 	return (ql_isr_aif(arg1, arg2));
137 }
138 
139 /*
140  * ql_isr_aif
141  *	Process mailbox and I/O command completions.
142  *
143  * Input:
144  *	arg:	adapter state pointer.
145  *	intvec:	interrupt vector.
146  *
147  * Returns:
148  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
149  *
150  * Context:
151  *	Interrupt or Kernel context, no mailbox commands allowed.
152  */
153 /* ARGSUSED */
154 uint_t
155 ql_isr_aif(caddr_t arg, caddr_t intvec)
156 {
157 	uint16_t		mbx;
158 	uint32_t		stat;
159 	ql_adapter_state_t	*ha = (void *)arg;
160 	uint32_t		set_flags = 0;
161 	uint32_t		reset_flags = 0;
162 	ql_head_t		isr_done_q = {NULL, NULL};
163 	uint_t			rval = DDI_INTR_UNCLAIMED;
164 	int			spurious_intr = 0;
165 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
166 	int			intr_loop = 4;
167 
168 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
169 
170 	QL_PM_LOCK(ha);
171 	if (ha->power_level != PM_LEVEL_D0) {
172 		/*
173 		 * Looks like we are about to go down soon, exit early.
174 		 */
175 		QL_PM_UNLOCK(ha);
176 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
177 		return (DDI_INTR_UNCLAIMED);
178 	}
179 	ha->busy++;
180 	QL_PM_UNLOCK(ha);
181 
182 	/* Acquire interrupt lock. */
183 	INTR_LOCK(ha);
184 
185 	if (CFG_IST(ha, CFG_CTRL_2200)) {
186 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
187 			/* Reset idle timer. */
188 			ha->idle_timer = 0;
189 			rval = DDI_INTR_CLAIMED;
190 			if (intr_loop) {
191 				intr_loop--;
192 			}
193 
194 			/* Special Fast Post 2200. */
195 			stat = 0;
196 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
197 			    ha->flags & ONLINE) {
198 				ql_srb_t	*sp;
199 
200 				mbx = RD16_IO_REG(ha, mailbox_out[23]);
201 
202 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
203 					/* Release mailbox registers. */
204 					WRT16_IO_REG(ha, semaphore, 0);
205 
206 					if (intr_loop) {
207 						WRT16_IO_REG(ha, hccr,
208 						    HC_CLR_RISC_INT);
209 					}
210 
211 					/* Get handle. */
212 					mbx >>= 4;
213 					stat = mbx & OSC_INDEX_MASK;
214 
215 					/* Validate handle. */
216 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
217 					    ha->outstanding_cmds[stat] : NULL;
218 
219 					if (sp != NULL && (sp->handle & 0xfff)
220 					    == mbx) {
221 						ha->outstanding_cmds[stat] =
222 						    NULL;
223 						sp->handle = 0;
224 						sp->flags &=
225 						    ~SRB_IN_TOKEN_ARRAY;
226 
227 						/* Set completed status. */
228 						sp->flags |= SRB_ISP_COMPLETED;
229 
230 						/* Set completion status */
231 						sp->pkt->pkt_reason =
232 						    CS_COMPLETE;
233 
234 						ql_fast_fcp_post(sp);
235 					} else if (mbx !=
236 					    (QL_FCA_BRAND & 0xfff)) {
237 						if (sp == NULL) {
238 							EL(ha, "unknown IOCB"
239 							    " handle=%xh\n",
240 							    mbx);
241 						} else {
242 							EL(ha, "mismatch IOCB"
243 							    " handle pkt=%xh, "
244 							    "sp=%xh\n", mbx,
245 							    sp->handle & 0xfff);
246 						}
247 
248 						(void) ql_binary_fw_dump(ha,
249 						    FALSE);
250 
251 						if (!(ha->task_daemon_flags &
252 						    (ISP_ABORT_NEEDED |
253 						    ABORT_ISP_ACTIVE))) {
254 							EL(ha, "ISP Invalid "
255 							    "handle, "
256 							    "isp_abort_needed"
257 							    "\n");
258 							set_flags |=
259 							    ISP_ABORT_NEEDED;
260 						}
261 					}
262 				}
263 			}
264 
265 			if (stat == 0) {
266 				/* Check for mailbox interrupt. */
267 				mbx = RD16_IO_REG(ha, semaphore);
268 				if (mbx & BIT_0) {
269 					/* Release mailbox registers. */
270 					WRT16_IO_REG(ha, semaphore, 0);
271 
272 					/* Get mailbox data. */
273 					mbx = RD16_IO_REG(ha, mailbox_out[0]);
274 					if (mbx > 0x3fff && mbx < 0x8000) {
275 						ql_mbx_completion(ha, mbx,
276 						    &set_flags, &reset_flags,
277 						    intr_loop);
278 					} else if (mbx > 0x7fff &&
279 					    mbx < 0xc000) {
280 						ql_async_event(ha, mbx,
281 						    &isr_done_q, &set_flags,
282 						    &reset_flags, intr_loop);
283 					} else {
284 						EL(ha, "UNKNOWN interrupt "
285 						    "type\n");
286 						intr = B_TRUE;
287 					}
288 				} else {
289 					ha->isp_rsp_index = RD16_IO_REG(ha,
290 					    resp_in);
291 
292 					if (ha->isp_rsp_index !=
293 					    ha->rsp_ring_index) {
294 						ql_response_pkt(ha,
295 						    &isr_done_q, &set_flags,
296 						    &reset_flags, intr_loop);
297 					} else if (++spurious_intr ==
298 					    MAX_SPURIOUS_INTR) {
299 						/*
300 						 * Process excessive
301 						 * spurious intrrupts
302 						 */
303 						ql_spurious_intr(ha,
304 						    intr_loop);
305 						EL(ha, "excessive spurious "
306 						    "interrupts, "
307 						    "isp_abort_needed\n");
308 						set_flags |= ISP_ABORT_NEEDED;
309 					} else {
310 						intr = B_TRUE;
311 					}
312 				}
313 			}
314 
315 			/* Clear RISC interrupt */
316 			if (intr || intr_loop == 0) {
317 				intr = B_FALSE;
318 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
319 			}
320 
321 			if (set_flags != 0 || reset_flags != 0) {
322 				TASK_DAEMON_LOCK(ha);
323 				ha->task_daemon_flags |= set_flags;
324 				ha->task_daemon_flags &= ~reset_flags;
325 				TASK_DAEMON_UNLOCK(ha);
326 				set_flags = 0;
327 				reset_flags = 0;
328 				daemon = B_TRUE;
329 			}
330 		}
331 	} else {
332 		if (CFG_IST(ha, CFG_CTRL_8021)) {
333 			ql_8021_clr_hw_intr(ha);
334 		}
335 		while ((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) {
336 			/* Capture FW defined interrupt info */
337 			mbx = MSW(stat);
338 
339 			/* Reset idle timer. */
340 			ha->idle_timer = 0;
341 			rval = DDI_INTR_CLAIMED;
342 
343 			if (CFG_IST(ha, CFG_CTRL_8021) &&
344 			    RD32_IO_REG(ha, nx_risc_int) == 0) {
345 				break;
346 			}
347 
348 			if (intr_loop) {
349 				intr_loop--;
350 			}
351 
352 			switch (stat & 0x1ff) {
353 			case ROM_MBX_SUCCESS:
354 			case ROM_MBX_ERR:
355 				ql_mbx_completion(ha, mbx, &set_flags,
356 				    &reset_flags, intr_loop);
357 
358 				/* Release mailbox registers. */
359 				if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
360 					WRT16_IO_REG(ha, semaphore, 0);
361 				}
362 				break;
363 
364 			case MBX_SUCCESS:
365 			case MBX_ERR:
366 				/* Sun FW, Release mailbox registers. */
367 				if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
368 					WRT16_IO_REG(ha, semaphore, 0);
369 				}
370 				ql_mbx_completion(ha, mbx, &set_flags,
371 				    &reset_flags, intr_loop);
372 				break;
373 
374 			case ASYNC_EVENT:
375 				/* Sun FW, Release mailbox registers. */
376 				if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
377 					WRT16_IO_REG(ha, semaphore, 0);
378 				}
379 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
380 				    &set_flags, &reset_flags, intr_loop);
381 				break;
382 
383 			case RESP_UPDATE:
384 				if (mbx != ha->rsp_ring_index) {
385 					ha->isp_rsp_index = mbx;
386 					ql_response_pkt(ha, &isr_done_q,
387 					    &set_flags, &reset_flags,
388 					    intr_loop);
389 				} else if (++spurious_intr ==
390 				    MAX_SPURIOUS_INTR) {
391 					/* Process excessive spurious intr. */
392 					ql_spurious_intr(ha, intr_loop);
393 					EL(ha, "excessive spurious "
394 					    "interrupts, isp_abort_needed\n");
395 					set_flags |= ISP_ABORT_NEEDED;
396 				} else {
397 					intr = B_TRUE;
398 				}
399 				break;
400 
401 			case SCSI_FAST_POST_16:
402 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
403 				ql_async_event(ha, stat, &isr_done_q,
404 				    &set_flags, &reset_flags, intr_loop);
405 				break;
406 
407 			case SCSI_FAST_POST_32:
408 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
409 				ql_async_event(ha, stat, &isr_done_q,
410 				    &set_flags, &reset_flags, intr_loop);
411 				break;
412 
413 			case CTIO_FAST_POST:
414 				stat = (stat & 0xffff0000) |
415 				    MBA_CTIO_COMPLETION;
416 				ql_async_event(ha, stat, &isr_done_q,
417 				    &set_flags, &reset_flags, intr_loop);
418 				break;
419 
420 			case IP_FAST_POST_XMT:
421 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
422 				ql_async_event(ha, stat, &isr_done_q,
423 				    &set_flags, &reset_flags, intr_loop);
424 				break;
425 
426 			case IP_FAST_POST_RCV:
427 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
428 				ql_async_event(ha, stat, &isr_done_q,
429 				    &set_flags, &reset_flags, intr_loop);
430 				break;
431 
432 			case IP_FAST_POST_BRD:
433 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
434 				ql_async_event(ha, stat, &isr_done_q,
435 				    &set_flags, &reset_flags, intr_loop);
436 				break;
437 
438 			case IP_FAST_POST_RCV_ALN:
439 				stat = (stat & 0xffff0000) |
440 				    MBA_IP_HDR_DATA_SPLIT;
441 				ql_async_event(ha, stat, &isr_done_q,
442 				    &set_flags, &reset_flags, intr_loop);
443 				break;
444 
445 			case ATIO_UPDATE:
446 				EL(ha, "unsupported ATIO queue update"
447 				    " interrupt, status=%xh\n", stat);
448 				intr = B_TRUE;
449 				break;
450 
451 			case ATIO_RESP_UPDATE:
452 				EL(ha, "unsupported ATIO response queue "
453 				    "update interrupt, status=%xh\n", stat);
454 				intr = B_TRUE;
455 				break;
456 
457 			default:
458 				ql_handle_uncommon_risc_intr(ha, stat,
459 				    &set_flags);
460 				intr = B_TRUE;
461 				break;
462 			}
463 
464 			/* Clear RISC interrupt */
465 			if (intr || intr_loop == 0) {
466 				intr = B_FALSE;
467 				if (CFG_IST(ha, CFG_CTRL_8021)) {
468 					ql_8021_clr_fw_intr(ha);
469 				} else if (CFG_IST(ha, CFG_CTRL_242581)) {
470 					WRT32_IO_REG(ha, hccr,
471 					    HC24_CLR_RISC_INT);
472 				} else {
473 					WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
474 				}
475 			}
476 
477 			if (set_flags != 0 || reset_flags != 0) {
478 				TASK_DAEMON_LOCK(ha);
479 				ha->task_daemon_flags |= set_flags;
480 				ha->task_daemon_flags &= ~reset_flags;
481 				TASK_DAEMON_UNLOCK(ha);
482 				set_flags = 0;
483 				reset_flags = 0;
484 				daemon = B_TRUE;
485 			}
486 
487 			if (ha->flags & PARITY_ERROR) {
488 				EL(ha, "parity/pause exit\n");
489 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
490 				break;
491 			}
492 		}
493 	}
494 
495 	/* Process claimed interrupts during polls. */
496 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
497 		ha->intr_claimed = B_FALSE;
498 		rval = DDI_INTR_CLAIMED;
499 	}
500 
501 	/* Release interrupt lock. */
502 	INTR_UNLOCK(ha);
503 
504 	if (daemon) {
505 		ql_awaken_task_daemon(ha, NULL, 0, 0);
506 	}
507 
508 	if (isr_done_q.first != NULL) {
509 		ql_done(isr_done_q.first);
510 	}
511 
512 	if (rval == DDI_INTR_CLAIMED) {
513 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
514 		ha->xioctl->TotalInterrupts++;
515 	} else {
516 		/*EMPTY*/
517 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
518 		    ha->instance);
519 	}
520 
521 	QL_PM_LOCK(ha);
522 	ha->busy--;
523 	QL_PM_UNLOCK(ha);
524 
525 	return (rval);
526 }
527 
528 /*
529  * ql_handle_uncommon_risc_intr
530  *	Handle an uncommon RISC interrupt.
531  *
532  * Input:
533  *	ha:		adapter state pointer.
534  *	stat:		interrupt status
535  *
536  * Context:
537  *	Interrupt or Kernel context, no mailbox commands allowed.
538  */
539 static void
540 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
541     uint32_t *set_flags)
542 {
543 	uint16_t	hccr_reg;
544 
545 	hccr_reg = RD16_IO_REG(ha, hccr);
546 
547 	if (stat & RH_RISC_PAUSED ||
548 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
549 
550 		ADAPTER_STATE_LOCK(ha);
551 		ha->flags |= PARITY_ERROR;
552 		ADAPTER_STATE_UNLOCK(ha);
553 
554 		if (ha->parity_pause_errors == 0 ||
555 		    ha->parity_hccr_err != hccr_reg ||
556 		    ha->parity_stat_err != stat) {
557 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
558 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
559 			    ha->instance, hccr_reg, stat,
560 			    ha->parity_pause_errors);
561 			ha->parity_hccr_err = hccr_reg;
562 			ha->parity_stat_err = stat;
563 		}
564 
565 		EL(ha, "parity/pause error, isp_abort_needed\n");
566 
567 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
568 			ql_reset_chip(ha);
569 		}
570 
571 		if (ha->parity_pause_errors == 0) {
572 			ha->log_parity_pause = B_TRUE;
573 		}
574 
575 		if (ha->parity_pause_errors < 0xffffffff) {
576 			ha->parity_pause_errors++;
577 		}
578 
579 		*set_flags |= ISP_ABORT_NEEDED;
580 
581 		/* Disable ISP interrupts. */
582 		CFG_IST(ha, CFG_CTRL_8021) ? ql_8021_disable_intrs(ha) :
583 		    WRT16_IO_REG(ha, ictrl, 0);
584 		ADAPTER_STATE_LOCK(ha);
585 		ha->flags &= ~INTERRUPTS_ENABLED;
586 		ADAPTER_STATE_UNLOCK(ha);
587 	} else {
588 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
589 		    stat, hccr_reg);
590 	}
591 }
592 
593 /*
594  * ql_spurious_intr
595  *	Inform Solaris of spurious interrupts.
596  *
597  * Input:
598  *	ha:		adapter state pointer.
599  *	intr_clr:	early interrupt clear
600  *
601  * Context:
602  *	Interrupt or Kernel context, no mailbox commands allowed.
603  */
604 static void
605 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
606 {
607 	ddi_devstate_t	state;
608 
609 	EL(ha, "Spurious interrupt\n");
610 
611 	/* Disable ISP interrupts. */
612 	WRT16_IO_REG(ha, ictrl, 0);
613 	ADAPTER_STATE_LOCK(ha);
614 	ha->flags &= ~INTERRUPTS_ENABLED;
615 	ADAPTER_STATE_UNLOCK(ha);
616 
617 	/* Clear RISC interrupt */
618 	if (intr_clr) {
619 		if (CFG_IST(ha, CFG_CTRL_8021)) {
620 			ql_8021_clr_fw_intr(ha);
621 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
622 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
623 		} else {
624 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
625 		}
626 	}
627 
628 	state = ddi_get_devstate(ha->dip);
629 	if (state == DDI_DEVSTATE_UP) {
630 		/*EMPTY*/
631 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
632 		    DDI_DEVICE_FAULT, "spurious interrupts");
633 	}
634 }
635 
636 /*
637  * ql_mbx_completion
638  *	Processes mailbox completions.
639  *
640  * Input:
641  *	ha:		adapter state pointer.
642  *	mb0:		Mailbox 0 contents.
643  *	set_flags:	task daemon flags to set.
644  *	reset_flags:	task daemon flags to reset.
645  *	intr_clr:	early interrupt clear
646  *
647  * Context:
648  *	Interrupt context.
649  */
650 /* ARGSUSED */
651 static void
652 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
653     uint32_t *reset_flags, int intr_clr)
654 {
655 	uint32_t	index;
656 	uint16_t	cnt;
657 
658 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
659 
660 	/* Load return mailbox registers. */
661 	MBX_REGISTER_LOCK(ha);
662 
663 	if (ha->mcp != NULL) {
664 		ha->mcp->mb[0] = mb0;
665 		index = ha->mcp->in_mb & ~MBX_0;
666 
667 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
668 			index >>= 1;
669 			if (index & MBX_0) {
670 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
671 				    mailbox_out[cnt]);
672 			}
673 		}
674 
675 	} else {
676 		EL(ha, "mcp == NULL\n");
677 	}
678 
679 	if (intr_clr) {
680 		/* Clear RISC interrupt. */
681 		if (CFG_IST(ha, CFG_CTRL_8021)) {
682 			ql_8021_clr_fw_intr(ha);
683 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
684 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
685 		} else {
686 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
687 		}
688 	}
689 
690 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
691 	if (ha->flags & INTERRUPTS_ENABLED) {
692 		cv_broadcast(&ha->cv_mbx_intr);
693 	}
694 
695 	MBX_REGISTER_UNLOCK(ha);
696 
697 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
698 }
699 
700 /*
701  * ql_async_event
702  *	Processes asynchronous events.
703  *
704  * Input:
705  *	ha:		adapter state pointer.
706  *	mbx:		Mailbox 0 register.
707  *	done_q:		head pointer to done queue.
708  *	set_flags:	task daemon flags to set.
709  *	reset_flags:	task daemon flags to reset.
710  *	intr_clr:	early interrupt clear
711  *
712  * Context:
713  *	Interrupt or Kernel context, no mailbox commands allowed.
714  */
715 static void
716 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
717     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
718 {
719 	uint32_t		handle;
720 	uint32_t		index;
721 	uint16_t		cnt;
722 	uint16_t		mb[MAX_MBOX_COUNT];
723 	ql_srb_t		*sp;
724 	port_id_t		s_id;
725 	ql_tgt_t		*tq;
726 	boolean_t		intr = B_TRUE;
727 	ql_adapter_state_t	*vha;
728 
729 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
730 
731 	/* Setup to process fast completion. */
732 	mb[0] = LSW(mbx);
733 	switch (mb[0]) {
734 	case MBA_SCSI_COMPLETION:
735 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
736 		    RD16_IO_REG(ha, mailbox_out[2]));
737 		break;
738 
739 	case MBA_CMPLT_1_16BIT:
740 		handle = MSW(mbx);
741 		mb[0] = MBA_SCSI_COMPLETION;
742 		break;
743 
744 	case MBA_CMPLT_1_32BIT:
745 		handle = SHORT_TO_LONG(MSW(mbx),
746 		    RD16_IO_REG(ha, mailbox_out[2]));
747 		mb[0] = MBA_SCSI_COMPLETION;
748 		break;
749 
750 	case MBA_CTIO_COMPLETION:
751 	case MBA_IP_COMPLETION:
752 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
753 		    RD16_IO_REG(ha, mailbox_out[1]),
754 		    RD16_IO_REG(ha, mailbox_out[2])) :
755 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
756 		mb[0] = MBA_SCSI_COMPLETION;
757 		break;
758 
759 	default:
760 		break;
761 	}
762 
763 	/* Handle asynchronous event */
764 	switch (mb[0]) {
765 	case MBA_SCSI_COMPLETION:
766 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
767 		    ha->instance);
768 
769 		if (intr_clr) {
770 			/* Clear RISC interrupt */
771 			if (CFG_IST(ha, CFG_CTRL_8021)) {
772 				ql_8021_clr_fw_intr(ha);
773 			} else if (CFG_IST(ha, CFG_CTRL_242581)) {
774 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
775 			} else {
776 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
777 			}
778 			intr = B_FALSE;
779 		}
780 
781 		if ((ha->flags & ONLINE) == 0) {
782 			break;
783 		}
784 
785 		/* Get handle. */
786 		index = handle & OSC_INDEX_MASK;
787 
788 		/* Validate handle. */
789 		sp = index < MAX_OUTSTANDING_COMMANDS ?
790 		    ha->outstanding_cmds[index] : NULL;
791 
792 		if (sp != NULL && sp->handle == handle) {
793 			ha->outstanding_cmds[index] = NULL;
794 			sp->handle = 0;
795 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
796 
797 			/* Set completed status. */
798 			sp->flags |= SRB_ISP_COMPLETED;
799 
800 			/* Set completion status */
801 			sp->pkt->pkt_reason = CS_COMPLETE;
802 
803 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
804 				/* Place block on done queue */
805 				ql_add_link_b(done_q, &sp->cmd);
806 			} else {
807 				ql_fast_fcp_post(sp);
808 			}
809 		} else if (handle != QL_FCA_BRAND) {
810 			if (sp == NULL) {
811 				EL(ha, "%xh unknown IOCB handle=%xh\n",
812 				    mb[0], handle);
813 			} else {
814 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
815 				    "sp=%xh\n", mb[0], handle, sp->handle);
816 			}
817 
818 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
819 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
820 			    RD16_IO_REG(ha, mailbox_out[1]),
821 			    RD16_IO_REG(ha, mailbox_out[2]),
822 			    RD16_IO_REG(ha, mailbox_out[3]),
823 			    RD16_IO_REG(ha, mailbox_out[6]),
824 			    RD16_IO_REG(ha, mailbox_out[7]));
825 
826 			(void) ql_binary_fw_dump(ha, FALSE);
827 
828 			if (!(ha->task_daemon_flags &
829 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
830 				EL(ha, "%xh ISP Invalid handle, "
831 				    "isp_abort_needed\n", mb[0]);
832 				*set_flags |= ISP_ABORT_NEEDED;
833 			}
834 		}
835 		break;
836 
837 	case MBA_RESET:		/* Reset */
838 		EL(ha, "%xh Reset received\n", mb[0]);
839 		*set_flags |= RESET_MARKER_NEEDED;
840 		break;
841 
842 	case MBA_SYSTEM_ERR:		/* System Error */
843 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
844 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
845 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
846 		mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
847 
848 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
849 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
850 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
851 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
852 		    RD16_IO_REG(ha, mailbox_out[4]),
853 		    RD16_IO_REG(ha, mailbox_out[5]),
854 		    RD16_IO_REG(ha, mailbox_out[6]), mb[7],
855 		    RD16_IO_REG(ha, mailbox_out[8]),
856 		    RD16_IO_REG(ha, mailbox_out[9]),
857 		    RD16_IO_REG(ha, mailbox_out[10]),
858 		    RD16_IO_REG(ha, mailbox_out[11]),
859 		    RD16_IO_REG(ha, mailbox_out[12]));
860 
861 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
862 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
863 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
864 		    mb[0], RD16_IO_REG(ha, mailbox_out[13]),
865 		    RD16_IO_REG(ha, mailbox_out[14]),
866 		    RD16_IO_REG(ha, mailbox_out[15]),
867 		    RD16_IO_REG(ha, mailbox_out[16]),
868 		    RD16_IO_REG(ha, mailbox_out[17]),
869 		    RD16_IO_REG(ha, mailbox_out[18]),
870 		    RD16_IO_REG(ha, mailbox_out[19]),
871 		    RD16_IO_REG(ha, mailbox_out[20]),
872 		    RD16_IO_REG(ha, mailbox_out[21]),
873 		    RD16_IO_REG(ha, mailbox_out[22]),
874 		    RD16_IO_REG(ha, mailbox_out[23]));
875 
876 		if (ha->reg_off->mbox_cnt > 24) {
877 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
878 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
879 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
880 			    RD16_IO_REG(ha, mailbox_out[24]),
881 			    RD16_IO_REG(ha, mailbox_out[25]),
882 			    RD16_IO_REG(ha, mailbox_out[26]),
883 			    RD16_IO_REG(ha, mailbox_out[27]),
884 			    RD16_IO_REG(ha, mailbox_out[28]),
885 			    RD16_IO_REG(ha, mailbox_out[29]),
886 			    RD16_IO_REG(ha, mailbox_out[30]),
887 			    RD16_IO_REG(ha, mailbox_out[31]));
888 		}
889 
890 		(void) ql_binary_fw_dump(ha, FALSE);
891 
892 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
893 		    mb[2], mb[3]);
894 
895 		if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
896 			ADAPTER_STATE_LOCK(ha);
897 			ha->flags |= MPI_RESET_NEEDED;
898 			ADAPTER_STATE_UNLOCK(ha);
899 		}
900 
901 		*set_flags |= ISP_ABORT_NEEDED;
902 		ha->xioctl->ControllerErrorCount++;
903 		break;
904 
905 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
906 		EL(ha, "%xh Request Transfer Error received, "
907 		    "isp_abort_needed\n", mb[0]);
908 
909 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
910 		    RD16_IO_REG(ha, mailbox_out[1]),
911 		    RD16_IO_REG(ha, mailbox_out[2]),
912 		    RD16_IO_REG(ha, mailbox_out[3]));
913 
914 		*set_flags |= ISP_ABORT_NEEDED;
915 		ha->xioctl->ControllerErrorCount++;
916 		break;
917 
918 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
919 		EL(ha, "%xh Response Transfer Error received,"
920 		    " isp_abort_needed\n", mb[0]);
921 
922 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
923 		    RD16_IO_REG(ha, mailbox_out[1]),
924 		    RD16_IO_REG(ha, mailbox_out[2]),
925 		    RD16_IO_REG(ha, mailbox_out[3]));
926 
927 		*set_flags |= ISP_ABORT_NEEDED;
928 		ha->xioctl->ControllerErrorCount++;
929 		break;
930 
931 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
932 		EL(ha, "%xh Request Queue Wake-up received\n",
933 		    mb[0]);
934 		break;
935 
936 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
937 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
938 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
939 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
940 
941 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
942 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
943 
944 		switch (mb[1]) {
945 		case MLA_LOGIN_OPERATIONAL_FW:
946 			ADAPTER_STATE_LOCK(ha);
947 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
948 			ADAPTER_STATE_UNLOCK(ha);
949 			break;
950 		case MLA_PANIC_RECOVERY:
951 		case MLA_LOGIN_DIAGNOSTIC_FW:
952 		case MLA_LOGIN_GOLDEN_FW:
953 		case MLA_REJECT_RESPONSE:
954 		default:
955 			break;
956 		}
957 		break;
958 
959 	case MBA_LIP_F8:	/* Received a LIP F8. */
960 	case MBA_LIP_RESET:	/* LIP reset occurred. */
961 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
962 		if (CFG_IST(ha, CFG_CTRL_8081)) {
963 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
964 			    "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
965 			    RD16_IO_REG(ha, mailbox_out[2]));
966 		} else {
967 			EL(ha, "%xh LIP received\n", mb[0]);
968 		}
969 
970 		ADAPTER_STATE_LOCK(ha);
971 		ha->flags &= ~POINT_TO_POINT;
972 		ADAPTER_STATE_UNLOCK(ha);
973 
974 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
975 			*set_flags |= LOOP_DOWN;
976 		}
977 		ql_port_state(ha, FC_STATE_OFFLINE,
978 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
979 
980 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
981 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
982 		}
983 
984 		ha->adapter_stats->lip_count++;
985 
986 		/* Update AEN queue. */
987 		ha->xioctl->TotalLipResets++;
988 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
989 			ql_enqueue_aen(ha, mb[0], NULL);
990 		}
991 		break;
992 
993 	case MBA_LOOP_UP:
994 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
995 		    CFG_CTRL_24258081))) {
996 			mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
997 			if (mb[1] == IIDMA_RATE_1GB) {		/* 1GB */
998 				ha->state = FC_PORT_STATE_MASK(
999 				    ha->state) | FC_STATE_1GBIT_SPEED;
1000 				index = 1;
1001 			} else if (mb[1] == IIDMA_RATE_2GB) {	/* 2GB */
1002 				ha->state = FC_PORT_STATE_MASK(
1003 				    ha->state) | FC_STATE_2GBIT_SPEED;
1004 				index = 2;
1005 			} else if (mb[1] == IIDMA_RATE_4GB) {	/* 4GB */
1006 				ha->state = FC_PORT_STATE_MASK(
1007 				    ha->state) | FC_STATE_4GBIT_SPEED;
1008 				index = 4;
1009 			} else if (mb[1] == IIDMA_RATE_8GB) {	/* 8GB */
1010 				ha->state = FC_PORT_STATE_MASK(
1011 				    ha->state) | FC_STATE_8GBIT_SPEED;
1012 				index = 8;
1013 			} else if (mb[1] == IIDMA_RATE_10GB) {	/* 10GB */
1014 				ha->state = FC_PORT_STATE_MASK(
1015 				    ha->state) | FC_STATE_10GBIT_SPEED;
1016 				index = 10;
1017 			} else {
1018 				ha->state = FC_PORT_STATE_MASK(
1019 				    ha->state);
1020 				index = 0;
1021 			}
1022 		} else {
1023 			ha->state = FC_PORT_STATE_MASK(ha->state) |
1024 			    FC_STATE_FULL_SPEED;
1025 			index = 1;
1026 		}
1027 
1028 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1029 			vha->state = FC_PORT_STATE_MASK(vha->state) |
1030 			    FC_PORT_SPEED_MASK(ha->state);
1031 		}
1032 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1033 
1034 		/* Update AEN queue. */
1035 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1036 			ql_enqueue_aen(ha, mb[0], NULL);
1037 		}
1038 		break;
1039 
1040 	case MBA_LOOP_DOWN:
1041 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1042 		    "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1043 		    RD16_IO_REG(ha, mailbox_out[2]),
1044 		    RD16_IO_REG(ha, mailbox_out[3]),
1045 		    RD16_IO_REG(ha, mailbox_out[4]));
1046 
1047 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1048 			*set_flags |= LOOP_DOWN;
1049 		}
1050 		ql_port_state(ha, FC_STATE_OFFLINE,
1051 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1052 
1053 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1054 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1055 		}
1056 
1057 		if (CFG_IST(ha, CFG_CTRL_258081)) {
1058 			ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1059 		}
1060 
1061 		/* Update AEN queue. */
1062 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1063 			ql_enqueue_aen(ha, mb[0], NULL);
1064 		}
1065 		break;
1066 
1067 	case MBA_PORT_UPDATE:
1068 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1069 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1070 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1071 		    RD16_IO_REG(ha, mailbox_out[3]) : 0);
1072 
1073 		/* Locate port state structure. */
1074 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1075 			if (vha->vp_index == LSB(mb[3])) {
1076 				break;
1077 			}
1078 		}
1079 		if (vha == NULL) {
1080 			break;
1081 		}
1082 
1083 		if (CFG_IST(ha, CFG_CTRL_8081) && mb[1] == 0xffff &&
1084 		    mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1085 		    MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1086 		    MSB(mb[3]) == 0x1e)) {
1087 			/*
1088 			 * received FLOGI reject
1089 			 * received FLOGO
1090 			 * FCF configuration changed
1091 			 * FIP Clear Virtual Link received
1092 			 * FKA timeout
1093 			 */
1094 			if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1095 				*set_flags |= LOOP_DOWN;
1096 			}
1097 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1098 			    COMMAND_WAIT_NEEDED | LOOP_DOWN);
1099 			if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1100 				ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1101 			}
1102 		/*
1103 		 * In N port 2 N port topology the FW provides a port
1104 		 * database entry at loop_id 0x7fe which we use to
1105 		 * acquire the Ports WWPN.
1106 		 */
1107 		} else if ((mb[1] != 0x7fe) &&
1108 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1109 		    (CFG_IST(ha, CFG_CTRL_24258081) &&
1110 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1111 			EL(ha, "%xh Port Database Update, Login/Logout "
1112 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1113 			    mb[0], mb[1], mb[2], mb[3]);
1114 		} else {
1115 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1116 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1117 			    mb[3]);
1118 			*set_flags |= LOOP_RESYNC_NEEDED;
1119 			*set_flags &= ~LOOP_DOWN;
1120 			*reset_flags |= LOOP_DOWN;
1121 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1122 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1123 			TASK_DAEMON_LOCK(ha);
1124 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1125 			vha->task_daemon_flags &= ~LOOP_DOWN;
1126 			TASK_DAEMON_UNLOCK(ha);
1127 			ADAPTER_STATE_LOCK(ha);
1128 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1129 			ADAPTER_STATE_UNLOCK(ha);
1130 		}
1131 
1132 		/* Update AEN queue. */
1133 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1134 			ql_enqueue_aen(ha, mb[0], NULL);
1135 		}
1136 		break;
1137 
1138 	case MBA_RSCN_UPDATE:
1139 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1140 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1141 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1142 		    RD16_IO_REG(ha, mailbox_out[3]) : 0);
1143 
1144 		/* Locate port state structure. */
1145 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1146 			if (vha->vp_index == LSB(mb[3])) {
1147 				break;
1148 			}
1149 		}
1150 
1151 		if (vha == NULL) {
1152 			break;
1153 		}
1154 
1155 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1156 		    MSB(mb[2]) == vha->d_id.b.area &&
1157 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1158 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1159 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1160 		} else {
1161 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1162 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1163 			if (FC_PORT_STATE_MASK(vha->state) !=
1164 			    FC_STATE_OFFLINE) {
1165 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1166 				TASK_DAEMON_LOCK(ha);
1167 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1168 				TASK_DAEMON_UNLOCK(ha);
1169 				*set_flags |= RSCN_UPDATE_NEEDED;
1170 			}
1171 		}
1172 
1173 		/* Update AEN queue. */
1174 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1175 			ql_enqueue_aen(ha, mb[0], NULL);
1176 		}
1177 		break;
1178 
1179 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1180 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1181 		    RD16_IO_REG(ha, mailbox_out[1]));
1182 		break;
1183 
1184 	case MBA_IP_RECEIVE:
1185 	case MBA_IP_BROADCAST:
1186 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1187 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1188 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1189 
1190 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1191 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1192 
1193 		/* Locate device queue. */
1194 		s_id.b.al_pa = LSB(mb[2]);
1195 		s_id.b.area = MSB(mb[2]);
1196 		s_id.b.domain = LSB(mb[1]);
1197 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1198 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1199 			break;
1200 		}
1201 
1202 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1203 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1204 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1205 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1206 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1207 
1208 		tq->ub_sequence_length = mb[3];
1209 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1210 		if (mb[3] % cnt) {
1211 			tq->ub_total_seg_cnt++;
1212 		}
1213 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1214 
1215 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1216 		    index++) {
1217 			mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1218 		}
1219 
1220 		tq->ub_seq_id = ++ha->ub_seq_id;
1221 		tq->ub_seq_cnt = 0;
1222 		tq->ub_frame_ro = 0;
1223 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1224 		    (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL :
1225 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1226 		ha->rcv_dev_q = tq;
1227 
1228 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1229 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1230 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1231 			    QL_SUCCESS) {
1232 				EL(ha, "ql_ub_frame_hdr failed, "
1233 				    "isp_abort_needed\n");
1234 				*set_flags |= ISP_ABORT_NEEDED;
1235 				break;
1236 			}
1237 		}
1238 		break;
1239 
1240 	case MBA_IP_LOW_WATER_MARK:
1241 	case MBA_IP_RCV_BUFFER_EMPTY:
1242 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1243 		    mb[0]);
1244 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1245 		break;
1246 
1247 	case MBA_IP_HDR_DATA_SPLIT:
1248 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1249 		break;
1250 
1251 	case MBA_ERROR_LOGGING_DISABLED:
1252 		EL(ha, "%xh error logging disabled received, "
1253 		    "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1254 		break;
1255 
1256 	case MBA_POINT_TO_POINT:
1257 	/* case MBA_DCBX_COMPLETED: */
1258 		if (CFG_IST(ha, CFG_CTRL_8081)) {
1259 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1260 			ha->async_event_wait |= BIT_1;
1261 		} else {
1262 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1263 		}
1264 		ADAPTER_STATE_LOCK(ha);
1265 		ha->flags |= POINT_TO_POINT;
1266 		ADAPTER_STATE_UNLOCK(ha);
1267 		break;
1268 
1269 	case MBA_FCF_CONFIG_ERROR:
1270 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1271 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1272 		break;
1273 
1274 	case MBA_DCBX_PARAM_CHANGED:
1275 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1276 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1277 		break;
1278 
1279 	case MBA_CHG_IN_CONNECTION:
1280 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1281 		if (mb[1] == 2) {
1282 			EL(ha, "%xh Change In Connection received, "
1283 			    "mbx1=%xh\n",  mb[0], mb[1]);
1284 			ADAPTER_STATE_LOCK(ha);
1285 			ha->flags &= ~POINT_TO_POINT;
1286 			ADAPTER_STATE_UNLOCK(ha);
1287 			if (ha->topology & QL_N_PORT) {
1288 				ha->topology = (uint8_t)(ha->topology &
1289 				    ~QL_N_PORT);
1290 				ha->topology = (uint8_t)(ha->topology |
1291 				    QL_NL_PORT);
1292 			}
1293 		} else {
1294 			EL(ha, "%xh Change In Connection received, "
1295 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1296 			*set_flags |= ISP_ABORT_NEEDED;
1297 		}
1298 		break;
1299 
1300 	case MBA_ZIO_UPDATE:
1301 		EL(ha, "%xh ZIO response received\n", mb[0]);
1302 
1303 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1304 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1305 		intr = B_FALSE;
1306 		break;
1307 
1308 	case MBA_PORT_BYPASS_CHANGED:
1309 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1310 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1311 		/*
1312 		 * Event generated when there is a transition on
1313 		 * port bypass of crystal+.
1314 		 * Mailbox 1:	Bit 0 - External.
1315 		 *		Bit 2 - Internal.
1316 		 * When the bit is 0, the port is bypassed.
1317 		 *
1318 		 * For now we will generate a LIP for all cases.
1319 		 */
1320 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1321 		break;
1322 
1323 	case MBA_RECEIVE_ERROR:
1324 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1325 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1326 		    RD16_IO_REG(ha, mailbox_out[2]));
1327 		break;
1328 
1329 	case MBA_LS_RJT_SENT:
1330 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1331 		    RD16_IO_REG(ha, mailbox_out[1]));
1332 		break;
1333 
1334 	case MBA_FW_RESTART_COMP:
1335 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1336 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1337 		break;
1338 
1339 	case MBA_IDC_COMPLETE:
1340 		EL(ha, "%xh Inter-driver communication complete received, "
1341 		    " mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh,"
1342 		    " mbx6=%xh, mbx7=%xh\n", mb[0],
1343 		    RD16_IO_REG(ha, mailbox_out[1]),
1344 		    RD16_IO_REG(ha, mailbox_out[2]),
1345 		    RD16_IO_REG(ha, mailbox_out[3]),
1346 		    RD16_IO_REG(ha, mailbox_out[4]),
1347 		    RD16_IO_REG(ha, mailbox_out[5]),
1348 		    RD16_IO_REG(ha, mailbox_out[6]),
1349 		    RD16_IO_REG(ha, mailbox_out[7]));
1350 		ha->async_event_wait |= BIT_0;
1351 		break;
1352 
1353 	case MBA_IDC_NOTIFICATION:
1354 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1355 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1356 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1357 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1358 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1359 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1360 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1361 		EL(ha, "%xh Inter-driver communication request notification "
1362 		    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1363 		    "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1364 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1365 		    ha->idc_mb[6], ha->idc_mb[7]);
1366 		*set_flags |= IDC_ACK_NEEDED;
1367 		break;
1368 
1369 	case MBA_IDC_TIME_EXTENDED:
1370 		EL(ha, "%xh Inter-driver communication time extended received,"
1371 		    " mbx1=%xh, mbx2=%xh\n", mb[0],
1372 		    RD16_IO_REG(ha, mailbox_out[1]),
1373 		    RD16_IO_REG(ha, mailbox_out[2]));
1374 		break;
1375 
1376 	default:
1377 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1378 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1379 		    RD16_IO_REG(ha, mailbox_out[2]),
1380 		    RD16_IO_REG(ha, mailbox_out[3]));
1381 		break;
1382 	}
1383 
1384 	/* Clear RISC interrupt */
1385 	if (intr && intr_clr) {
1386 		if (CFG_IST(ha, CFG_CTRL_8021)) {
1387 			ql_8021_clr_fw_intr(ha);
1388 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
1389 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1390 		} else {
1391 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1392 		}
1393 	}
1394 
1395 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1396 }
1397 
1398 /*
1399  * ql_fast_fcp_post
1400  *	Fast path for good SCSI I/O completion.
1401  *
1402  * Input:
1403  *	sp:	SRB pointer.
1404  *
1405  * Context:
1406  *	Interrupt or Kernel context, no mailbox commands allowed.
1407  */
1408 static void
1409 ql_fast_fcp_post(ql_srb_t *sp)
1410 {
1411 	ql_adapter_state_t	*ha = sp->ha;
1412 	ql_lun_t		*lq = sp->lun_queue;
1413 	ql_tgt_t		*tq = lq->target_queue;
1414 
1415 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1416 
1417 	/* Acquire device queue lock. */
1418 	DEVICE_QUEUE_LOCK(tq);
1419 
1420 	/* Decrement outstanding commands on device. */
1421 	if (tq->outcnt != 0) {
1422 		tq->outcnt--;
1423 	}
1424 
1425 	if (sp->flags & SRB_FCP_CMD_PKT) {
1426 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1427 			/*
1428 			 * Clear the flag for this LUN so that
1429 			 * untagged commands can be submitted
1430 			 * for it.
1431 			 */
1432 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1433 		}
1434 
1435 		if (lq->lun_outcnt != 0) {
1436 			lq->lun_outcnt--;
1437 		}
1438 	}
1439 
1440 	/* Reset port down retry count on good completion. */
1441 	tq->port_down_retry_count = ha->port_down_retry_count;
1442 	tq->qfull_retry_count = ha->qfull_retry_count;
1443 
1444 	/* Remove command from watchdog queue. */
1445 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1446 		ql_remove_link(&tq->wdg, &sp->wdg);
1447 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1448 	}
1449 
1450 	if (lq->cmd.first != NULL) {
1451 		ql_next(ha, lq);
1452 	} else {
1453 		/* Release LU queue specific lock. */
1454 		DEVICE_QUEUE_UNLOCK(tq);
1455 		if (ha->pha->pending_cmds.first != NULL) {
1456 			ql_start_iocb(ha, NULL);
1457 		}
1458 	}
1459 
1460 	/* Sync buffers if required.  */
1461 	if (sp->flags & SRB_MS_PKT) {
1462 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1463 		    DDI_DMA_SYNC_FORCPU);
1464 	}
1465 
1466 	/* Map ISP completion codes. */
1467 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1468 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1469 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1470 
1471 	/* Now call the pkt completion callback */
1472 	if (sp->flags & SRB_POLL) {
1473 		sp->flags &= ~SRB_POLL;
1474 	} else if (sp->pkt->pkt_comp) {
1475 		INTR_UNLOCK(ha);
1476 		(*sp->pkt->pkt_comp)(sp->pkt);
1477 		INTR_LOCK(ha);
1478 	}
1479 
1480 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1481 }
1482 
1483 /*
1484  * ql_response_pkt
1485  *	Processes response entry.
1486  *
1487  * Input:
1488  *	ha:		adapter state pointer.
1489  *	done_q:		head pointer to done queue.
1490  *	set_flags:	task daemon flags to set.
1491  *	reset_flags:	task daemon flags to reset.
1492  *	intr_clr:	early interrupt clear
1493  *
1494  * Context:
1495  *	Interrupt or Kernel context, no mailbox commands allowed.
1496  */
1497 static void
1498 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1499     uint32_t *reset_flags, int intr_clr)
1500 {
1501 	response_t	*pkt;
1502 	uint32_t	dma_sync_size_1 = 0;
1503 	uint32_t	dma_sync_size_2 = 0;
1504 	int		status = 0;
1505 
1506 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1507 
1508 	/* Clear RISC interrupt */
1509 	if (intr_clr) {
1510 		if (CFG_IST(ha, CFG_CTRL_8021)) {
1511 			ql_8021_clr_fw_intr(ha);
1512 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
1513 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1514 		} else {
1515 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1516 		}
1517 	}
1518 
1519 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1520 		EL(ha, "index error = %xh, isp_abort_needed",
1521 		    ha->isp_rsp_index);
1522 		*set_flags |= ISP_ABORT_NEEDED;
1523 		return;
1524 	}
1525 
1526 	if ((ha->flags & ONLINE) == 0) {
1527 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1528 		return;
1529 	}
1530 
1531 	/* Calculate size of response queue entries to sync. */
1532 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1533 		dma_sync_size_1 = (uint32_t)
1534 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1535 		    RESPONSE_ENTRY_SIZE);
1536 	} else if (ha->isp_rsp_index == 0) {
1537 		dma_sync_size_1 = (uint32_t)
1538 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1539 		    RESPONSE_ENTRY_SIZE);
1540 	} else {
1541 		/* Responses wrap around the Q */
1542 		dma_sync_size_1 = (uint32_t)
1543 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1544 		    RESPONSE_ENTRY_SIZE);
1545 		dma_sync_size_2 = (uint32_t)
1546 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1547 	}
1548 
1549 	/* Sync DMA buffer. */
1550 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1551 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1552 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1553 	    DDI_DMA_SYNC_FORKERNEL);
1554 	if (dma_sync_size_2) {
1555 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1556 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1557 		    DDI_DMA_SYNC_FORKERNEL);
1558 	}
1559 
1560 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1561 		pkt = ha->response_ring_ptr;
1562 
1563 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1564 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1565 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1566 		    RESPONSE_ENTRY_SIZE);
1567 
1568 		/* Adjust ring index. */
1569 		ha->rsp_ring_index++;
1570 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1571 			ha->rsp_ring_index = 0;
1572 			ha->response_ring_ptr = ha->response_ring_bp;
1573 		} else {
1574 			ha->response_ring_ptr++;
1575 		}
1576 
1577 		/* Process packet. */
1578 		if (ha->status_srb != NULL && pkt->entry_type !=
1579 		    STATUS_CONT_TYPE) {
1580 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1581 			ha->status_srb = NULL;
1582 		}
1583 
1584 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1585 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1586 
1587 		if (pkt->entry_status != 0) {
1588 			ql_error_entry(ha, pkt, done_q, set_flags,
1589 			    reset_flags);
1590 		} else {
1591 			switch (pkt->entry_type) {
1592 			case STATUS_TYPE:
1593 				status |= CFG_IST(ha, CFG_CTRL_24258081) ?
1594 				    ql_24xx_status_entry(ha,
1595 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1596 				    reset_flags) :
1597 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1598 				    done_q, set_flags, reset_flags);
1599 				break;
1600 			case STATUS_CONT_TYPE:
1601 				ql_status_cont_entry(ha,
1602 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1603 				    reset_flags);
1604 				break;
1605 			case IP_TYPE:
1606 			case IP_A64_TYPE:
1607 			case IP_CMD_TYPE:
1608 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1609 				    set_flags, reset_flags);
1610 				break;
1611 			case IP_RECEIVE_TYPE:
1612 				ql_ip_rcv_entry(ha,
1613 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1614 				    reset_flags);
1615 				break;
1616 			case IP_RECEIVE_CONT_TYPE:
1617 				ql_ip_rcv_cont_entry(ha,
1618 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1619 				    set_flags, reset_flags);
1620 				break;
1621 			case IP_24XX_RECEIVE_TYPE:
1622 				ql_ip_24xx_rcv_entry(ha,
1623 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1624 				    set_flags, reset_flags);
1625 				break;
1626 			case MS_TYPE:
1627 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1628 				    set_flags, reset_flags);
1629 				break;
1630 			case REPORT_ID_TYPE:
1631 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1632 				    done_q, set_flags, reset_flags);
1633 				break;
1634 			case ELS_PASSTHRU_TYPE:
1635 				ql_els_passthru_entry(ha,
1636 				    (els_passthru_entry_rsp_t *)pkt,
1637 				    done_q, set_flags, reset_flags);
1638 				break;
1639 			case IP_BUF_POOL_TYPE:
1640 			case MARKER_TYPE:
1641 			case VP_MODIFY_TYPE:
1642 			case VP_CONTROL_TYPE:
1643 				break;
1644 			default:
1645 				EL(ha, "Unknown IOCB entry type=%xh\n",
1646 				    pkt->entry_type);
1647 				break;
1648 			}
1649 		}
1650 	}
1651 
1652 	/* Inform RISC of processed responses. */
1653 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1654 
1655 	/* RESET packet received delay for possible async event. */
1656 	if (status & BIT_0) {
1657 		drv_usecwait(500000);
1658 	}
1659 
1660 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1661 }
1662 
1663 /*
1664  * ql_error_entry
1665  *	Processes error entry.
1666  *
1667  * Input:
1668  *	ha = adapter state pointer.
1669  *	pkt = entry pointer.
1670  *	done_q = head pointer to done queue.
1671  *	set_flags = task daemon flags to set.
1672  *	reset_flags = task daemon flags to reset.
1673  *
1674  * Context:
1675  *	Interrupt or Kernel context, no mailbox commands allowed.
1676  */
1677 /* ARGSUSED */
1678 static void
1679 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1680     uint32_t *set_flags, uint32_t *reset_flags)
1681 {
1682 	ql_srb_t	*sp;
1683 	uint32_t	index, resp_identifier;
1684 
1685 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1686 		EL(ha, "Aborted command\n");
1687 		return;
1688 	}
1689 
1690 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1691 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1692 
1693 	if (pkt->entry_status & BIT_6) {
1694 		EL(ha, "Request Queue DMA error\n");
1695 	} else if (pkt->entry_status & BIT_5) {
1696 		EL(ha, "Invalid Entry Order\n");
1697 	} else if (pkt->entry_status & BIT_4) {
1698 		EL(ha, "Invalid Entry Count\n");
1699 	} else if (pkt->entry_status & BIT_3) {
1700 		EL(ha, "Invalid Entry Parameter\n");
1701 	} else if (pkt->entry_status & BIT_2) {
1702 		EL(ha, "Invalid Entry Type\n");
1703 	} else if (pkt->entry_status & BIT_1) {
1704 		EL(ha, "Busy\n");
1705 	} else {
1706 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1707 	}
1708 
1709 	/* Validate the response entry handle. */
1710 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1711 	index = resp_identifier & OSC_INDEX_MASK;
1712 	if (index < MAX_OUTSTANDING_COMMANDS) {
1713 		/* the index seems reasonable */
1714 		sp = ha->outstanding_cmds[index];
1715 		if (sp != NULL) {
1716 			if (sp->handle == resp_identifier) {
1717 				/* Neo, you're the one... */
1718 				ha->outstanding_cmds[index] = NULL;
1719 				sp->handle = 0;
1720 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1721 			} else {
1722 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1723 				    resp_identifier, sp->handle);
1724 				sp = NULL;
1725 				ql_signal_abort(ha, set_flags);
1726 			}
1727 		} else {
1728 			sp = ql_verify_preprocessed_cmd(ha,
1729 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1730 		}
1731 	} else {
1732 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1733 		    index, resp_identifier);
1734 		ql_signal_abort(ha, set_flags);
1735 	}
1736 
1737 	if (sp != NULL) {
1738 		/* Bad payload or header */
1739 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1740 			/* Bad payload or header, set error status. */
1741 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1742 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1743 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1744 		} else {
1745 			/* Set error status. */
1746 			sp->pkt->pkt_reason = CS_UNKNOWN;
1747 		}
1748 
1749 		/* Set completed status. */
1750 		sp->flags |= SRB_ISP_COMPLETED;
1751 
1752 		/* Place command on done queue. */
1753 		ql_add_link_b(done_q, &sp->cmd);
1754 
1755 	}
1756 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1757 }
1758 
1759 /*
1760  * ql_status_entry
1761  *	Processes received ISP2200-2300 status entry.
1762  *
1763  * Input:
1764  *	ha:		adapter state pointer.
1765  *	pkt:		entry pointer.
1766  *	done_q:		done queue pointer.
1767  *	set_flags:	task daemon flags to set.
1768  *	reset_flags:	task daemon flags to reset.
1769  *
1770  * Returns:
1771  *	BIT_0 = CS_RESET status received.
1772  *
1773  * Context:
1774  *	Interrupt or Kernel context, no mailbox commands allowed.
1775  */
1776 /* ARGSUSED */
1777 static int
1778 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1779     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1780 {
1781 	ql_srb_t		*sp;
1782 	uint32_t		index, resp_identifier;
1783 	uint16_t		comp_status;
1784 	int			rval = 0;
1785 
1786 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1787 
1788 	/* Validate the response entry handle. */
1789 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1790 	index = resp_identifier & OSC_INDEX_MASK;
1791 	if (index < MAX_OUTSTANDING_COMMANDS) {
1792 		/* the index seems reasonable */
1793 		sp = ha->outstanding_cmds[index];
1794 		if (sp != NULL) {
1795 			if (sp->handle == resp_identifier) {
1796 				/* Neo, you're the one... */
1797 				ha->outstanding_cmds[index] = NULL;
1798 				sp->handle = 0;
1799 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1800 			} else {
1801 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1802 				    resp_identifier, sp->handle);
1803 				sp = NULL;
1804 				ql_signal_abort(ha, set_flags);
1805 			}
1806 		} else {
1807 			sp = ql_verify_preprocessed_cmd(ha,
1808 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1809 		}
1810 	} else {
1811 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1812 		    index, resp_identifier);
1813 		ql_signal_abort(ha, set_flags);
1814 	}
1815 
1816 	if (sp != NULL) {
1817 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1818 		    &pkt->comp_status);
1819 
1820 		/*
1821 		 * We dont care about SCSI QFULLs.
1822 		 */
1823 		if (comp_status == CS_QUEUE_FULL) {
1824 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1825 			    sp->lun_queue->target_queue->d_id.b24,
1826 			    sp->lun_queue->lun_no);
1827 			comp_status = CS_COMPLETE;
1828 		}
1829 
1830 		/*
1831 		 * 2300 firmware marks completion status as data underrun
1832 		 * for scsi qfulls. Make it transport complete.
1833 		 */
1834 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1835 		    (comp_status == CS_DATA_UNDERRUN) &&
1836 		    (pkt->scsi_status_l != 0)) {
1837 			comp_status = CS_COMPLETE;
1838 		}
1839 
1840 		/*
1841 		 * Workaround T3 issue where we do not get any data xferred
1842 		 * but get back a good status.
1843 		 */
1844 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1845 		    comp_status == CS_COMPLETE &&
1846 		    pkt->scsi_status_l == 0 &&
1847 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1848 		    pkt->residual_length == 0 &&
1849 		    sp->fcp &&
1850 		    sp->fcp->fcp_data_len != 0 &&
1851 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1852 		    SF_DATA_OUT) {
1853 			comp_status = CS_ABORTED;
1854 		}
1855 
1856 		if (sp->flags & SRB_MS_PKT) {
1857 			/*
1858 			 * Ideally it should never be true. But there
1859 			 * is a bug in FW which upon receiving invalid
1860 			 * parameters in MS IOCB returns it as
1861 			 * status entry and not as ms entry type.
1862 			 */
1863 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1864 			    set_flags, reset_flags);
1865 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1866 			    ha->instance);
1867 			return (0);
1868 		}
1869 
1870 		/*
1871 		 * Fast path to good SCSI I/O completion
1872 		 */
1873 		if ((comp_status == CS_COMPLETE) &
1874 		    (!pkt->scsi_status_l) &
1875 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1876 			/* Set completed status. */
1877 			sp->flags |= SRB_ISP_COMPLETED;
1878 			sp->pkt->pkt_reason = comp_status;
1879 			ql_fast_fcp_post(sp);
1880 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1881 			    ha->instance);
1882 			return (0);
1883 		}
1884 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1885 		    reset_flags);
1886 	}
1887 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1888 
1889 	return (rval);
1890 }
1891 
1892 /*
1893  * ql_24xx_status_entry
1894  *	Processes received ISP24xx status entry.
1895  *
1896  * Input:
1897  *	ha:		adapter state pointer.
1898  *	pkt:		entry pointer.
1899  *	done_q:		done queue pointer.
1900  *	set_flags:	task daemon flags to set.
1901  *	reset_flags:	task daemon flags to reset.
1902  *
1903  * Returns:
1904  *	BIT_0 = CS_RESET status received.
1905  *
1906  * Context:
1907  *	Interrupt or Kernel context, no mailbox commands allowed.
1908  */
1909 /* ARGSUSED */
1910 static int
1911 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1912     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1913 {
1914 	ql_srb_t		*sp = NULL;
1915 	uint16_t		comp_status;
1916 	uint32_t		index, resp_identifier;
1917 	int			rval = 0;
1918 
1919 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1920 
1921 	/* Validate the response entry handle. */
1922 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1923 	index = resp_identifier & OSC_INDEX_MASK;
1924 	if (index < MAX_OUTSTANDING_COMMANDS) {
1925 		/* the index seems reasonable */
1926 		sp = ha->outstanding_cmds[index];
1927 		if (sp != NULL) {
1928 			if (sp->handle == resp_identifier) {
1929 				/* Neo, you're the one... */
1930 				ha->outstanding_cmds[index] = NULL;
1931 				sp->handle = 0;
1932 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1933 			} else {
1934 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1935 				    resp_identifier, sp->handle);
1936 				sp = NULL;
1937 				ql_signal_abort(ha, set_flags);
1938 			}
1939 		} else {
1940 			sp = ql_verify_preprocessed_cmd(ha,
1941 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1942 		}
1943 	} else {
1944 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1945 		    index, resp_identifier);
1946 		ql_signal_abort(ha, set_flags);
1947 	}
1948 
1949 	if (sp != NULL) {
1950 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1951 		    &pkt->comp_status);
1952 
1953 		/* We dont care about SCSI QFULLs. */
1954 		if (comp_status == CS_QUEUE_FULL) {
1955 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1956 			    sp->lun_queue->target_queue->d_id.b24,
1957 			    sp->lun_queue->lun_no);
1958 			comp_status = CS_COMPLETE;
1959 		}
1960 
1961 		/*
1962 		 * 2300 firmware marks completion status as data underrun
1963 		 * for scsi qfulls. Make it transport complete.
1964 		 */
1965 		if ((comp_status == CS_DATA_UNDERRUN) &&
1966 		    (pkt->scsi_status_l != 0)) {
1967 			comp_status = CS_COMPLETE;
1968 		}
1969 
1970 		/*
1971 		 * Workaround T3 issue where we do not get any data xferred
1972 		 * but get back a good status.
1973 		 */
1974 		if (comp_status == CS_COMPLETE &&
1975 		    pkt->scsi_status_l == 0 &&
1976 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1977 		    pkt->residual_length != 0 &&
1978 		    sp->fcp &&
1979 		    sp->fcp->fcp_data_len != 0 &&
1980 		    sp->fcp->fcp_cntl.cntl_write_data) {
1981 			comp_status = CS_ABORTED;
1982 		}
1983 
1984 		/*
1985 		 * Fast path to good SCSI I/O completion
1986 		 */
1987 		if ((comp_status == CS_COMPLETE) &
1988 		    (!pkt->scsi_status_l) &
1989 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1990 			/* Set completed status. */
1991 			sp->flags |= SRB_ISP_COMPLETED;
1992 			sp->pkt->pkt_reason = comp_status;
1993 			ql_fast_fcp_post(sp);
1994 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1995 			    ha->instance);
1996 			return (0);
1997 		}
1998 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
1999 		    set_flags, reset_flags);
2000 	}
2001 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2002 
2003 	return (rval);
2004 }
2005 
2006 /*
2007  * ql_verify_preprocessed_cmd
2008  *	Handles preprocessed cmds..
2009  *
2010  * Input:
2011  *	ha:		adapter state pointer.
2012  *	pkt_handle:	handle pointer.
2013  *	set_flags:	task daemon flags to set.
2014  *	reset_flags:	task daemon flags to reset.
2015  *
2016  * Returns:
2017  *	srb pointer or NULL
2018  *
2019  * Context:
2020  *	Interrupt or Kernel context, no mailbox commands allowed.
2021  */
2022 /* ARGSUSED */
2023 ql_srb_t *
2024 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle,
2025     uint32_t *set_flags, uint32_t *reset_flags)
2026 {
2027 	ql_srb_t		*sp = NULL;
2028 	uint32_t		index, resp_identifier;
2029 	uint32_t		get_handle = 10;
2030 
2031 	while (get_handle) {
2032 		/* Get handle. */
2033 		resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle);
2034 		index = resp_identifier & OSC_INDEX_MASK;
2035 		/* Validate handle. */
2036 		if (index < MAX_OUTSTANDING_COMMANDS) {
2037 			sp = ha->outstanding_cmds[index];
2038 		}
2039 
2040 		if (sp != NULL) {
2041 			EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2042 			    resp_identifier, get_handle, index);
2043 			break;
2044 		} else {
2045 			get_handle -= 1;
2046 			drv_usecwait(10000);
2047 			if (get_handle == 1) {
2048 				/* Last chance, Sync whole DMA buffer. */
2049 				(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2050 				    RESPONSE_Q_BUFFER_OFFSET,
2051 				    RESPONSE_QUEUE_SIZE,
2052 				    DDI_DMA_SYNC_FORKERNEL);
2053 				EL(ha, "last chance DMA sync, index=%xh\n",
2054 				    index);
2055 			}
2056 		}
2057 	}
2058 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2059 
2060 	return (sp);
2061 }
2062 
2063 
2064 /*
2065  * ql_status_error
2066  *	Processes received ISP status entry error.
2067  *
2068  * Input:
2069  *	ha:		adapter state pointer.
2070  *	sp:		SRB pointer.
2071  *	pkt:		entry pointer.
2072  *	done_q:		done queue pointer.
2073  *	set_flags:	task daemon flags to set.
2074  *	reset_flags:	task daemon flags to reset.
2075  *
2076  * Returns:
2077  *	BIT_0 = CS_RESET status received.
2078  *
2079  * Context:
2080  *	Interrupt or Kernel context, no mailbox commands allowed.
2081  */
2082 /* ARGSUSED */
2083 static int
2084 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
2085     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2086 {
2087 	uint32_t		sense_sz = 0;
2088 	uint32_t		cnt;
2089 	ql_tgt_t		*tq;
2090 	fcp_rsp_t		*fcpr;
2091 	struct fcp_rsp_info	*rsp;
2092 	int			rval = 0;
2093 
2094 	struct {
2095 		uint8_t		*rsp_info;
2096 		uint8_t		*req_sense_data;
2097 		uint32_t	residual_length;
2098 		uint32_t	fcp_residual_length;
2099 		uint32_t	rsp_info_length;
2100 		uint32_t	req_sense_length;
2101 		uint16_t	comp_status;
2102 		uint8_t		state_flags_l;
2103 		uint8_t		state_flags_h;
2104 		uint8_t		scsi_status_l;
2105 		uint8_t		scsi_status_h;
2106 	} sts;
2107 
2108 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2109 
2110 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
2111 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2112 
2113 		/* Setup status. */
2114 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2115 		    &pkt24->comp_status);
2116 		sts.scsi_status_l = pkt24->scsi_status_l;
2117 		sts.scsi_status_h = pkt24->scsi_status_h;
2118 
2119 		/* Setup firmware residuals. */
2120 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2121 		    ddi_get32(ha->hba_buf.acc_handle,
2122 		    (uint32_t *)&pkt24->residual_length) : 0;
2123 
2124 		/* Setup FCP residuals. */
2125 		sts.fcp_residual_length = sts.scsi_status_h &
2126 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2127 		    ddi_get32(ha->hba_buf.acc_handle,
2128 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2129 
2130 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2131 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2132 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2133 
2134 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2135 			    sts.residual_length,
2136 			    pkt24->fcp_rsp_residual_count);
2137 			sts.scsi_status_h = (uint8_t)
2138 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2139 		}
2140 
2141 		/* Setup state flags. */
2142 		sts.state_flags_l = pkt24->state_flags_l;
2143 		sts.state_flags_h = pkt24->state_flags_h;
2144 
2145 		if (sp->fcp->fcp_data_len &&
2146 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2147 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2148 			sts.state_flags_h = (uint8_t)
2149 			    (sts.state_flags_h | SF_GOT_BUS |
2150 			    SF_GOT_TARGET | SF_SENT_CMD |
2151 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2152 		} else {
2153 			sts.state_flags_h = (uint8_t)
2154 			    (sts.state_flags_h | SF_GOT_BUS |
2155 			    SF_GOT_TARGET | SF_SENT_CMD |
2156 			    SF_GOT_STATUS);
2157 		}
2158 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2159 			sts.state_flags_l = (uint8_t)
2160 			    (sts.state_flags_l | SF_DATA_OUT);
2161 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2162 			sts.state_flags_l = (uint8_t)
2163 			    (sts.state_flags_l | SF_DATA_IN);
2164 		}
2165 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2166 			sts.state_flags_l = (uint8_t)
2167 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2168 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2169 			sts.state_flags_l = (uint8_t)
2170 			    (sts.state_flags_l | SF_ORDERED_Q);
2171 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2172 			sts.state_flags_l = (uint8_t)
2173 			    (sts.state_flags_l | SF_SIMPLE_Q);
2174 		}
2175 
2176 		/* Setup FCP response info. */
2177 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2178 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2179 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2180 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2181 			if (sts.rsp_info_length >
2182 			    sizeof (struct fcp_rsp_info)) {
2183 				sts.rsp_info_length =
2184 				    sizeof (struct fcp_rsp_info);
2185 			}
2186 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2187 				ql_chg_endian(sts.rsp_info + cnt, 4);
2188 			}
2189 		} else {
2190 			sts.rsp_info_length = 0;
2191 		}
2192 
2193 		/* Setup sense data. */
2194 		sts.req_sense_data =
2195 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2196 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2197 			sts.req_sense_length =
2198 			    ddi_get32(ha->hba_buf.acc_handle,
2199 			    (uint32_t *)&pkt24->fcp_sense_length);
2200 			sts.state_flags_h = (uint8_t)
2201 			    (sts.state_flags_h | SF_ARQ_DONE);
2202 			sense_sz = (uint32_t)
2203 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2204 			    (uintptr_t)sts.req_sense_data);
2205 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2206 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2207 			}
2208 		} else {
2209 			sts.req_sense_length = 0;
2210 		}
2211 	} else {
2212 		/* Setup status. */
2213 		sts.comp_status = (uint16_t)ddi_get16(
2214 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2215 		sts.scsi_status_l = pkt23->scsi_status_l;
2216 		sts.scsi_status_h = pkt23->scsi_status_h;
2217 
2218 		/* Setup firmware residuals. */
2219 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2220 		    ddi_get32(ha->hba_buf.acc_handle,
2221 		    (uint32_t *)&pkt23->residual_length) : 0;
2222 
2223 		/* Setup FCP residuals. */
2224 		sts.fcp_residual_length = sts.scsi_status_h &
2225 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2226 		    sts.residual_length : 0;
2227 
2228 		/* Setup state flags. */
2229 		sts.state_flags_l = pkt23->state_flags_l;
2230 		sts.state_flags_h = pkt23->state_flags_h;
2231 
2232 		/* Setup FCP response info. */
2233 		sts.rsp_info = &pkt23->rsp_info[0];
2234 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2235 			sts.rsp_info_length = ddi_get16(
2236 			    ha->hba_buf.acc_handle,
2237 			    (uint16_t *)&pkt23->rsp_info_length);
2238 			if (sts.rsp_info_length >
2239 			    sizeof (struct fcp_rsp_info)) {
2240 				sts.rsp_info_length =
2241 				    sizeof (struct fcp_rsp_info);
2242 			}
2243 		} else {
2244 			sts.rsp_info_length = 0;
2245 		}
2246 
2247 		/* Setup sense data. */
2248 		sts.req_sense_data = &pkt23->req_sense_data[0];
2249 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2250 		    ddi_get16(ha->hba_buf.acc_handle,
2251 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2252 	}
2253 
2254 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2255 
2256 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2257 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2258 	    sizeof (fcp_rsp_t));
2259 
2260 	tq = sp->lun_queue->target_queue;
2261 
2262 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2263 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2264 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2265 	}
2266 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2267 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2268 	}
2269 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2270 		fcpr->fcp_u.fcp_status.resid_over = 1;
2271 	}
2272 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2273 		fcpr->fcp_u.fcp_status.resid_under = 1;
2274 	}
2275 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2276 
2277 	/* Set ISP completion status */
2278 	sp->pkt->pkt_reason = sts.comp_status;
2279 
2280 	/* Update statistics. */
2281 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2282 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2283 
2284 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2285 		if (sense_sz > sts.rsp_info_length) {
2286 			sense_sz = sts.rsp_info_length;
2287 		}
2288 
2289 		/* copy response information data. */
2290 		if (sense_sz) {
2291 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2292 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2293 		}
2294 		fcpr->fcp_response_len = sense_sz;
2295 
2296 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2297 		    fcpr->fcp_response_len);
2298 
2299 		switch (*(sts.rsp_info + 3)) {
2300 		case FCP_NO_FAILURE:
2301 			break;
2302 		case FCP_DL_LEN_MISMATCH:
2303 			ha->adapter_stats->d_stats[lobyte(
2304 			    tq->loop_id)].dl_len_mismatches++;
2305 			break;
2306 		case FCP_CMND_INVALID:
2307 			break;
2308 		case FCP_DATA_RO_MISMATCH:
2309 			ha->adapter_stats->d_stats[lobyte(
2310 			    tq->loop_id)].data_ro_mismatches++;
2311 			break;
2312 		case FCP_TASK_MGMT_NOT_SUPPTD:
2313 			break;
2314 		case FCP_TASK_MGMT_FAILED:
2315 			ha->adapter_stats->d_stats[lobyte(
2316 			    tq->loop_id)].task_mgmt_failures++;
2317 			break;
2318 		default:
2319 			break;
2320 		}
2321 	} else {
2322 		/*
2323 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2324 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2325 		 */
2326 		fcpr->fcp_response_len = 0;
2327 	}
2328 
2329 	/* Set reset status received. */
2330 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2331 		rval |= BIT_0;
2332 	}
2333 
2334 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2335 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2336 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2337 	    ha->task_daemon_flags & LOOP_DOWN) {
2338 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2339 		    tq->d_id.b24, sp->lun_queue->lun_no);
2340 
2341 		/* Set retry status. */
2342 		sp->flags |= SRB_RETRY;
2343 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2344 	    tq->port_down_retry_count != 0 &&
2345 	    (sts.comp_status == CS_INCOMPLETE ||
2346 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2347 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2348 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2349 	    sts.comp_status == CS_PORT_BUSY)) {
2350 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2351 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2352 		    tq->port_down_retry_count);
2353 
2354 		/* Set retry status. */
2355 		sp->flags |= SRB_RETRY;
2356 
2357 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2358 			/* Acquire device queue lock. */
2359 			DEVICE_QUEUE_LOCK(tq);
2360 
2361 			tq->flags |= TQF_QUEUE_SUSPENDED;
2362 
2363 			/* Decrement port down count. */
2364 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2365 				tq->port_down_retry_count--;
2366 			}
2367 
2368 			DEVICE_QUEUE_UNLOCK(tq);
2369 
2370 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2371 			    == 0 &&
2372 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2373 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2374 				sp->ha->adapter_stats->d_stats[lobyte(
2375 				    tq->loop_id)].logouts_recvd++;
2376 				ql_send_logo(sp->ha, tq, done_q);
2377 			}
2378 
2379 			ADAPTER_STATE_LOCK(ha);
2380 			if (ha->port_retry_timer == 0) {
2381 				if ((ha->port_retry_timer =
2382 				    ha->port_down_retry_delay) == 0) {
2383 					*set_flags |=
2384 					    PORT_RETRY_NEEDED;
2385 				}
2386 			}
2387 			ADAPTER_STATE_UNLOCK(ha);
2388 		}
2389 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2390 	    (sts.comp_status == CS_RESET ||
2391 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2392 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2393 		if (sts.comp_status == CS_RESET) {
2394 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2395 			    tq->d_id.b24, sp->lun_queue->lun_no);
2396 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2397 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2398 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2399 			    tq->qfull_retry_count);
2400 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2401 				tq->flags |= TQF_QUEUE_SUSPENDED;
2402 
2403 				tq->qfull_retry_count--;
2404 
2405 				ADAPTER_STATE_LOCK(ha);
2406 				if (ha->port_retry_timer == 0) {
2407 					if ((ha->port_retry_timer =
2408 					    ha->qfull_retry_delay) ==
2409 					    0) {
2410 						*set_flags |=
2411 						    PORT_RETRY_NEEDED;
2412 					}
2413 				}
2414 				ADAPTER_STATE_UNLOCK(ha);
2415 			}
2416 		} else {
2417 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2418 			    tq->d_id.b24, sp->lun_queue->lun_no);
2419 		}
2420 
2421 		/* Set retry status. */
2422 		sp->flags |= SRB_RETRY;
2423 	} else {
2424 		fcpr->fcp_resid =
2425 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2426 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2427 
2428 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2429 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2430 
2431 			if (sts.scsi_status_l == STATUS_CHECK) {
2432 				sp->pkt->pkt_reason = CS_COMPLETE;
2433 			} else {
2434 				EL(ha, "transport error - "
2435 				    "underrun & invalid resid\n");
2436 				EL(ha, "ssh=%xh, ssl=%xh\n",
2437 				    sts.scsi_status_h, sts.scsi_status_l);
2438 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2439 			}
2440 		}
2441 
2442 		/* Ignore firmware underrun error. */
2443 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2444 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2445 		    (sts.scsi_status_l != STATUS_CHECK &&
2446 		    sts.scsi_status_l != STATUS_GOOD))) {
2447 			sp->pkt->pkt_reason = CS_COMPLETE;
2448 		}
2449 
2450 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2451 			ha->xioctl->DeviceErrorCount++;
2452 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2453 			    "\n", sts.comp_status, tq->d_id.b24,
2454 			    sp->lun_queue->lun_no);
2455 		}
2456 
2457 		/* Set target request sense data. */
2458 		if (sts.scsi_status_l == STATUS_CHECK) {
2459 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2460 
2461 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2462 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2463 				    sts.req_sense_data[2] !=
2464 				    KEY_UNIT_ATTENTION) {
2465 					ha->xioctl->DeviceErrorCount++;
2466 				}
2467 
2468 				sense_sz = sts.req_sense_length;
2469 
2470 				/* Insure data does not exceed buf. */
2471 				if (sp->pkt->pkt_rsplen <=
2472 				    (uint32_t)sizeof (fcp_rsp_t) +
2473 				    fcpr->fcp_response_len) {
2474 					sp->request_sense_length = 0;
2475 				} else {
2476 					sp->request_sense_length = (uint32_t)
2477 					    (sp->pkt->pkt_rsplen -
2478 					    sizeof (fcp_rsp_t) -
2479 					    fcpr->fcp_response_len);
2480 				}
2481 
2482 				if (sense_sz <
2483 				    sp->request_sense_length) {
2484 					sp->request_sense_length =
2485 					    sense_sz;
2486 				}
2487 
2488 				sp->request_sense_ptr = (caddr_t)rsp;
2489 
2490 				sense_sz = (uint32_t)
2491 				    (((uintptr_t)pkt23 +
2492 				    sizeof (sts_entry_t)) -
2493 				    (uintptr_t)sts.req_sense_data);
2494 				if (sp->request_sense_length <
2495 				    sense_sz) {
2496 					sense_sz =
2497 					    sp->request_sense_length;
2498 				}
2499 
2500 				fcpr->fcp_sense_len = sense_sz;
2501 
2502 				/* Move sense data. */
2503 				ddi_rep_get8(ha->hba_buf.acc_handle,
2504 				    (uint8_t *)sp->request_sense_ptr,
2505 				    sts.req_sense_data,
2506 				    (size_t)sense_sz,
2507 				    DDI_DEV_AUTOINCR);
2508 
2509 				sp->request_sense_ptr += sense_sz;
2510 				sp->request_sense_length -= sense_sz;
2511 				if (sp->request_sense_length != 0 &&
2512 				    !(CFG_IST(ha, CFG_CTRL_8021))) {
2513 					ha->status_srb = sp;
2514 				}
2515 			}
2516 
2517 			if (sense_sz != 0) {
2518 				EL(sp->ha, "check condition sense data, "
2519 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2520 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2521 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2522 				    sp->lun_queue->lun_no,
2523 				    sts.req_sense_data[0],
2524 				    sts.req_sense_data[1],
2525 				    sts.req_sense_data[2],
2526 				    sts.req_sense_data[3],
2527 				    sts.req_sense_data[4],
2528 				    sts.req_sense_data[5],
2529 				    sts.req_sense_data[6],
2530 				    sts.req_sense_data[7],
2531 				    sts.req_sense_data[8],
2532 				    sts.req_sense_data[9],
2533 				    sts.req_sense_data[10],
2534 				    sts.req_sense_data[11],
2535 				    sts.req_sense_data[12],
2536 				    sts.req_sense_data[13],
2537 				    sts.req_sense_data[14],
2538 				    sts.req_sense_data[15],
2539 				    sts.req_sense_data[16],
2540 				    sts.req_sense_data[17]);
2541 			} else {
2542 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2543 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2544 			}
2545 		}
2546 	}
2547 
2548 	/* Set completed status. */
2549 	sp->flags |= SRB_ISP_COMPLETED;
2550 
2551 	/* Place command on done queue. */
2552 	if (ha->status_srb == NULL) {
2553 		ql_add_link_b(done_q, &sp->cmd);
2554 	}
2555 
2556 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2557 
2558 	return (rval);
2559 }
2560 
2561 /*
2562  * ql_status_cont_entry
2563  *	Processes status continuation entry.
2564  *
2565  * Input:
2566  *	ha:		adapter state pointer.
2567  *	pkt:		entry pointer.
2568  *	done_q:		done queue pointer.
2569  *	set_flags:	task daemon flags to set.
2570  *	reset_flags:	task daemon flags to reset.
2571  *
2572  * Context:
2573  *	Interrupt or Kernel context, no mailbox commands allowed.
2574  */
2575 /* ARGSUSED */
2576 static void
2577 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2578     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2579 {
2580 	uint32_t	sense_sz, index;
2581 	ql_srb_t	*sp = ha->status_srb;
2582 
2583 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2584 
2585 	if (sp != NULL && sp->request_sense_length) {
2586 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2587 			sense_sz = sizeof (pkt->req_sense_data);
2588 		} else {
2589 			sense_sz = sp->request_sense_length;
2590 		}
2591 
2592 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2593 			for (index = 0; index < sense_sz; index += 4) {
2594 				ql_chg_endian((uint8_t *)
2595 				    &pkt->req_sense_data[0] + index, 4);
2596 			}
2597 		}
2598 
2599 		/* Move sense data. */
2600 		ddi_rep_get8(ha->hba_buf.acc_handle,
2601 		    (uint8_t *)sp->request_sense_ptr,
2602 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2603 		    DDI_DEV_AUTOINCR);
2604 
2605 		sp->request_sense_ptr += sense_sz;
2606 		sp->request_sense_length -= sense_sz;
2607 
2608 		/* Place command on done queue. */
2609 		if (sp->request_sense_length == 0) {
2610 			ql_add_link_b(done_q, &sp->cmd);
2611 			ha->status_srb = NULL;
2612 		}
2613 	}
2614 
2615 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2616 }
2617 
2618 /*
2619  * ql_ip_entry
2620  *	Processes received ISP IP entry.
2621  *
2622  * Input:
2623  *	ha:		adapter state pointer.
2624  *	pkt:		entry pointer.
2625  *	done_q:		done queue pointer.
2626  *	set_flags:	task daemon flags to set.
2627  *	reset_flags:	task daemon flags to reset.
2628  *
2629  * Context:
2630  *	Interrupt or Kernel context, no mailbox commands allowed.
2631  */
2632 /* ARGSUSED */
2633 static void
2634 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2635     uint32_t *set_flags, uint32_t *reset_flags)
2636 {
2637 	ql_srb_t	*sp;
2638 	uint32_t	index, resp_identifier;
2639 	ql_tgt_t	*tq;
2640 
2641 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2642 
2643 	/* Validate the response entry handle. */
2644 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2645 	index = resp_identifier & OSC_INDEX_MASK;
2646 	if (index < MAX_OUTSTANDING_COMMANDS) {
2647 		/* the index seems reasonable */
2648 		sp = ha->outstanding_cmds[index];
2649 		if (sp != NULL) {
2650 			if (sp->handle == resp_identifier) {
2651 				/* Neo, you're the one... */
2652 				ha->outstanding_cmds[index] = NULL;
2653 				sp->handle = 0;
2654 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2655 			} else {
2656 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2657 				    resp_identifier, sp->handle);
2658 				sp = NULL;
2659 				ql_signal_abort(ha, set_flags);
2660 			}
2661 		} else {
2662 			sp = ql_verify_preprocessed_cmd(ha,
2663 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2664 		}
2665 	} else {
2666 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2667 		    index, resp_identifier);
2668 		ql_signal_abort(ha, set_flags);
2669 	}
2670 
2671 	if (sp != NULL) {
2672 		tq = sp->lun_queue->target_queue;
2673 
2674 		/* Set ISP completion status */
2675 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2676 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2677 
2678 			sp->pkt->pkt_reason = ddi_get16(
2679 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
2680 		} else {
2681 			sp->pkt->pkt_reason = ddi_get16(
2682 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2683 		}
2684 
2685 		if (ha->task_daemon_flags & LOOP_DOWN) {
2686 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2687 			    tq->d_id.b24);
2688 
2689 			/* Set retry status. */
2690 			sp->flags |= SRB_RETRY;
2691 
2692 		} else if (tq->port_down_retry_count &&
2693 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2694 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2695 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2696 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2697 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2698 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2699 			    sp->pkt->pkt_reason, tq->d_id.b24,
2700 			    tq->port_down_retry_count);
2701 
2702 			/* Set retry status. */
2703 			sp->flags |= SRB_RETRY;
2704 
2705 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2706 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2707 				ha->adapter_stats->d_stats[lobyte(
2708 				    tq->loop_id)].logouts_recvd++;
2709 				ql_send_logo(ha, tq, done_q);
2710 			}
2711 
2712 			/* Acquire device queue lock. */
2713 			DEVICE_QUEUE_LOCK(tq);
2714 
2715 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2716 				tq->flags |= TQF_QUEUE_SUSPENDED;
2717 
2718 				tq->port_down_retry_count--;
2719 
2720 				ADAPTER_STATE_LOCK(ha);
2721 				if (ha->port_retry_timer == 0) {
2722 					if ((ha->port_retry_timer =
2723 					    ha->port_down_retry_delay) == 0) {
2724 						*set_flags |=
2725 						    PORT_RETRY_NEEDED;
2726 					}
2727 				}
2728 				ADAPTER_STATE_UNLOCK(ha);
2729 			}
2730 
2731 			/* Release device queue specific lock. */
2732 			DEVICE_QUEUE_UNLOCK(tq);
2733 
2734 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2735 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2736 
2737 			/* Set retry status. */
2738 			sp->flags |= SRB_RETRY;
2739 		} else {
2740 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2741 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2742 				    sp->pkt->pkt_reason, tq->d_id.b24);
2743 			}
2744 		}
2745 
2746 		/* Set completed status. */
2747 		sp->flags |= SRB_ISP_COMPLETED;
2748 
2749 		ql_add_link_b(done_q, &sp->cmd);
2750 
2751 	}
2752 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2753 }
2754 
2755 /*
2756  * ql_ip_rcv_entry
2757  *	Processes received ISP IP buffers entry.
2758  *
2759  * Input:
2760  *	ha:		adapter state pointer.
2761  *	pkt:		entry pointer.
2762  *	done_q:		done queue pointer.
2763  *	set_flags:	task daemon flags to set.
2764  *	reset_flags:	task daemon flags to reset.
2765  *
2766  * Context:
2767  *	Interrupt or Kernel context, no mailbox commands allowed.
2768  */
2769 /* ARGSUSED */
2770 static void
2771 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2772     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2773 {
2774 	port_id_t	s_id;
2775 	uint16_t	index;
2776 	uint8_t		cnt;
2777 	ql_tgt_t	*tq;
2778 
2779 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2780 
2781 	/* Locate device queue. */
2782 	s_id.b.al_pa = pkt->s_id[0];
2783 	s_id.b.area = pkt->s_id[1];
2784 	s_id.b.domain = pkt->s_id[2];
2785 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2786 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2787 		return;
2788 	}
2789 
2790 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2791 	    &pkt->seq_length);
2792 	tq->ub_total_seg_cnt = pkt->segment_count;
2793 	tq->ub_seq_id = ++ha->ub_seq_id;
2794 	tq->ub_seq_cnt = 0;
2795 	tq->ub_frame_ro = 0;
2796 	tq->ub_loop_id = pkt->loop_id;
2797 	ha->rcv_dev_q = tq;
2798 
2799 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2800 	    tq->ub_total_seg_cnt; cnt++) {
2801 
2802 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2803 		    &pkt->buffer_handle[cnt]);
2804 
2805 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2806 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2807 			*set_flags |= ISP_ABORT_NEEDED;
2808 			break;
2809 		}
2810 	}
2811 
2812 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2813 }
2814 
2815 /*
2816  * ql_ip_rcv_cont_entry
2817  *	Processes received ISP IP buffers continuation entry.
2818  *
2819  * Input:
2820  *	ha:		adapter state pointer.
2821  *	pkt:		entry pointer.
2822  *	done_q:		done queue pointer.
2823  *	set_flags:	task daemon flags to set.
2824  *	reset_flags:	task daemon flags to reset.
2825  *
2826  * Context:
2827  *	Interrupt or Kernel context, no mailbox commands allowed.
2828  */
2829 /* ARGSUSED */
2830 static void
2831 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2832     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2833 {
2834 	uint16_t	index;
2835 	uint8_t		cnt;
2836 	ql_tgt_t	*tq;
2837 
2838 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2839 
2840 	if ((tq = ha->rcv_dev_q) == NULL) {
2841 		EL(ha, "No IP receive device\n");
2842 		return;
2843 	}
2844 
2845 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2846 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2847 
2848 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2849 		    &pkt->buffer_handle[cnt]);
2850 
2851 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2852 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2853 			*set_flags |= ISP_ABORT_NEEDED;
2854 			break;
2855 		}
2856 	}
2857 
2858 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2859 }
2860 
2861 /*
2862  * ip_rcv_24xx_entry_t
2863  *	Processes received ISP24xx IP buffers entry.
2864  *
2865  * Input:
2866  *	ha:		adapter state pointer.
2867  *	pkt:		entry pointer.
2868  *	done_q:		done queue pointer.
2869  *	set_flags:	task daemon flags to set.
2870  *	reset_flags:	task daemon flags to reset.
2871  *
2872  * Context:
2873  *	Interrupt or Kernel context, no mailbox commands allowed.
2874  */
2875 /* ARGSUSED */
2876 static void
2877 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2878     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2879 {
2880 	port_id_t	s_id;
2881 	uint16_t	index;
2882 	uint8_t		cnt;
2883 	ql_tgt_t	*tq;
2884 
2885 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2886 
2887 	/* Locate device queue. */
2888 	s_id.b.al_pa = pkt->s_id[0];
2889 	s_id.b.area = pkt->s_id[1];
2890 	s_id.b.domain = pkt->s_id[2];
2891 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2892 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2893 		return;
2894 	}
2895 
2896 	if (tq->ub_total_seg_cnt == 0) {
2897 		tq->ub_sequence_length = (uint16_t)ddi_get16(
2898 		    ha->hba_buf.acc_handle, &pkt->seq_length);
2899 		tq->ub_total_seg_cnt = pkt->segment_count;
2900 		tq->ub_seq_id = ++ha->ub_seq_id;
2901 		tq->ub_seq_cnt = 0;
2902 		tq->ub_frame_ro = 0;
2903 		tq->ub_loop_id = (uint16_t)ddi_get16(
2904 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2905 	}
2906 
2907 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2908 	    tq->ub_total_seg_cnt; cnt++) {
2909 
2910 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2911 		    &pkt->buffer_handle[cnt]);
2912 
2913 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2914 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2915 			*set_flags |= ISP_ABORT_NEEDED;
2916 			break;
2917 		}
2918 	}
2919 
2920 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2921 }
2922 
2923 /*
2924  * ql_ms_entry
2925  *	Processes received Name/Management/CT Pass-Through entry.
2926  *
2927  * Input:
2928  *	ha:		adapter state pointer.
2929  *	pkt23:		entry pointer.
2930  *	done_q:		done queue pointer.
2931  *	set_flags:	task daemon flags to set.
2932  *	reset_flags:	task daemon flags to reset.
2933  *
2934  * Context:
2935  *	Interrupt or Kernel context, no mailbox commands allowed.
2936  */
2937 /* ARGSUSED */
2938 static void
2939 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2940     uint32_t *set_flags, uint32_t *reset_flags)
2941 {
2942 	ql_srb_t		*sp;
2943 	uint32_t		index, cnt, resp_identifier;
2944 	ql_tgt_t		*tq;
2945 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
2946 
2947 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2948 
2949 	/* Validate the response entry handle. */
2950 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2951 	index = resp_identifier & OSC_INDEX_MASK;
2952 	if (index < MAX_OUTSTANDING_COMMANDS) {
2953 		/* the index seems reasonable */
2954 		sp = ha->outstanding_cmds[index];
2955 		if (sp != NULL) {
2956 			if (sp->handle == resp_identifier) {
2957 				/* Neo, you're the one... */
2958 				ha->outstanding_cmds[index] = NULL;
2959 				sp->handle = 0;
2960 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2961 			} else {
2962 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2963 				    resp_identifier, sp->handle);
2964 				sp = NULL;
2965 				ql_signal_abort(ha, set_flags);
2966 			}
2967 		} else {
2968 			sp = ql_verify_preprocessed_cmd(ha,
2969 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2970 		}
2971 	} else {
2972 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2973 		    index, resp_identifier);
2974 		ql_signal_abort(ha, set_flags);
2975 	}
2976 
2977 	if (sp != NULL) {
2978 		if (!(sp->flags & SRB_MS_PKT)) {
2979 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
2980 			    sp->flags);
2981 			*set_flags |= ISP_ABORT_NEEDED;
2982 			return;
2983 		}
2984 
2985 		tq = sp->lun_queue->target_queue;
2986 
2987 		/* Set ISP completion status */
2988 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2989 			sp->pkt->pkt_reason = ddi_get16(
2990 			    ha->hba_buf.acc_handle, &pkt24->status);
2991 		} else {
2992 			sp->pkt->pkt_reason = ddi_get16(
2993 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2994 		}
2995 
2996 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
2997 		    sp->retry_count) {
2998 			EL(ha, "Resouce Unavailable Retry = %d\n",
2999 			    sp->retry_count);
3000 
3001 			/* Set retry status. */
3002 			sp->retry_count--;
3003 			sp->flags |= SRB_RETRY;
3004 
3005 			/* Acquire device queue lock. */
3006 			DEVICE_QUEUE_LOCK(tq);
3007 
3008 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3009 				tq->flags |= TQF_QUEUE_SUSPENDED;
3010 
3011 				ADAPTER_STATE_LOCK(ha);
3012 				if (ha->port_retry_timer == 0) {
3013 					ha->port_retry_timer = 2;
3014 				}
3015 				ADAPTER_STATE_UNLOCK(ha);
3016 			}
3017 
3018 			/* Release device queue specific lock. */
3019 			DEVICE_QUEUE_UNLOCK(tq);
3020 
3021 		} else if (tq->port_down_retry_count &&
3022 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3023 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3024 			EL(ha, "Port Down Retry\n");
3025 
3026 			/* Set retry status. */
3027 			sp->flags |= SRB_RETRY;
3028 
3029 			/* Acquire device queue lock. */
3030 			DEVICE_QUEUE_LOCK(tq);
3031 
3032 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3033 				tq->flags |= TQF_QUEUE_SUSPENDED;
3034 
3035 				tq->port_down_retry_count--;
3036 
3037 				ADAPTER_STATE_LOCK(ha);
3038 				if (ha->port_retry_timer == 0) {
3039 					if ((ha->port_retry_timer =
3040 					    ha->port_down_retry_delay) == 0) {
3041 						*set_flags |=
3042 						    PORT_RETRY_NEEDED;
3043 					}
3044 				}
3045 				ADAPTER_STATE_UNLOCK(ha);
3046 			}
3047 			/* Release device queue specific lock. */
3048 			DEVICE_QUEUE_UNLOCK(tq);
3049 
3050 		} else if (sp->pkt->pkt_reason == CS_RESET) {
3051 			EL(ha, "Reset Retry\n");
3052 
3053 			/* Set retry status. */
3054 			sp->flags |= SRB_RETRY;
3055 
3056 		} else if (CFG_IST(ha, CFG_CTRL_24258081) &&
3057 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3058 			cnt = ddi_get32(ha->hba_buf.acc_handle,
3059 			    &pkt24->resp_byte_count);
3060 			if (cnt < sizeof (fc_ct_header_t)) {
3061 				EL(ha, "Data underrun\n");
3062 			} else {
3063 				sp->pkt->pkt_reason = CS_COMPLETE;
3064 			}
3065 
3066 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3067 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3068 		}
3069 
3070 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
3071 			/*EMPTY*/
3072 			QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
3073 			    ha->instance, sp->pkt->pkt_cmd[8],
3074 			    sp->pkt->pkt_cmd[9]);
3075 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3076 		}
3077 
3078 		/* For nameserver restore command, management change header. */
3079 		if ((sp->flags & SRB_RETRY) == 0) {
3080 			tq->d_id.b24 == 0xfffffc ?
3081 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3082 			    sp->pkt->pkt_cmd, B_TRUE) :
3083 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3084 			    sp->pkt->pkt_resp, B_TRUE);
3085 		}
3086 
3087 		/* Set completed status. */
3088 		sp->flags |= SRB_ISP_COMPLETED;
3089 
3090 		/* Place command on done queue. */
3091 		ql_add_link_b(done_q, &sp->cmd);
3092 
3093 	}
3094 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3095 }
3096 
3097 /*
3098  * ql_report_id_entry
3099  *	Processes received Name/Management/CT Pass-Through entry.
3100  *
3101  * Input:
3102  *	ha:		adapter state pointer.
3103  *	pkt:		entry pointer.
3104  *	done_q:		done queue pointer.
3105  *	set_flags:	task daemon flags to set.
3106  *	reset_flags:	task daemon flags to reset.
3107  *
3108  * Context:
3109  *	Interrupt or Kernel context, no mailbox commands allowed.
3110  */
3111 /* ARGSUSED */
3112 static void
3113 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3114     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3115 {
3116 	ql_adapter_state_t	*vha;
3117 
3118 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3119 
3120 	EL(ha, "format=%d, vp=%d, status=%d\n",
3121 	    pkt->format, pkt->vp_index, pkt->status);
3122 
3123 	if (pkt->format == 1) {
3124 		/* Locate port state structure. */
3125 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3126 			if (vha->vp_index == pkt->vp_index) {
3127 				break;
3128 			}
3129 		}
3130 		if (vha != NULL && vha->vp_index != 0 &&
3131 		    (pkt->status == CS_COMPLETE ||
3132 		    pkt->status == CS_PORT_ID_CHANGE)) {
3133 			*set_flags |= LOOP_RESYNC_NEEDED;
3134 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3135 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3136 			TASK_DAEMON_LOCK(ha);
3137 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3138 			vha->task_daemon_flags &= ~LOOP_DOWN;
3139 			TASK_DAEMON_UNLOCK(ha);
3140 		}
3141 	}
3142 
3143 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3144 }
3145 
3146 /*
3147  * ql_els_entry
3148  *	Processes received ELS Pass-Through entry.
3149  *
3150  * Input:
3151  *	ha:		adapter state pointer.
3152  *	pkt23:		entry pointer.
3153  *	done_q:		done queue pointer.
3154  *	set_flags:	task daemon flags to set.
3155  *	reset_flags:	task daemon flags to reset.
3156  *
3157  * Context:
3158  *	Interrupt or Kernel context, no mailbox commands allowed.
3159  */
3160 /* ARGSUSED */
3161 static void
3162 ql_els_passthru_entry(