1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_isr.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_init.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57     uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60     uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62     uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65     uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67     uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69     uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71     ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73     ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75     ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77     uint32_t *, uint32_t *);
78 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79     ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83     ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85     uint32_t *, uint32_t *);
86 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87     ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_els_passthru_entry(ql_adapter_state_t *,
89     els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *,
91     uint32_t *, uint32_t *);
92 static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags);
93 
94 /*
95  * ql_isr
96  *	Process all INTX intr types.
97  *
98  * Input:
99  *	arg1:	adapter state pointer.
100  *
101  * Returns:
102  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
103  *
104  * Context:
105  *	Interrupt or Kernel context, no mailbox commands allowed.
106  */
107 /* ARGSUSED */
108 uint_t
109 ql_isr(caddr_t arg1)
110 {
111 	return (ql_isr_aif(arg1, 0));
112 }
113 
114 /*
115  * ql_isr_default
116  *	Process unknown/unvectored intr types
117  *
118  * Input:
119  *	arg1:	adapter state pointer.
120  *	arg2:	interrupt vector.
121  *
122  * Returns:
123  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
124  *
125  * Context:
126  *	Interrupt or Kernel context, no mailbox commands allowed.
127  */
128 /* ARGSUSED */
129 uint_t
130 ql_isr_default(caddr_t arg1, caddr_t arg2)
131 {
132 	ql_adapter_state_t	*ha = (void *)arg1;
133 
134 	EL(ha, "isr_default called: idx=%x\n", arg2);
135 	return (ql_isr_aif(arg1, arg2));
136 }
137 
138 /*
139  * ql_isr_aif
140  *	Process mailbox and I/O command completions.
141  *
142  * Input:
143  *	arg:	adapter state pointer.
144  *	intvec:	interrupt vector.
145  *
146  * Returns:
147  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
148  *
149  * Context:
150  *	Interrupt or Kernel context, no mailbox commands allowed.
151  */
152 /* ARGSUSED */
153 uint_t
154 ql_isr_aif(caddr_t arg, caddr_t intvec)
155 {
156 	uint16_t		mbx;
157 	uint32_t		stat;
158 	ql_adapter_state_t	*ha = (void *)arg;
159 	uint32_t		set_flags = 0;
160 	uint32_t		reset_flags = 0;
161 	ql_head_t		isr_done_q = {NULL, NULL};
162 	uint_t			rval = DDI_INTR_UNCLAIMED;
163 	int			spurious_intr = 0;
164 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
165 	int			intr_loop = 4;
166 
167 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
168 
169 	QL_PM_LOCK(ha);
170 	if (ha->power_level != PM_LEVEL_D0) {
171 		/*
172 		 * Looks like we are about to go down soon, exit early.
173 		 */
174 		QL_PM_UNLOCK(ha);
175 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
176 		return (DDI_INTR_UNCLAIMED);
177 	}
178 	ha->busy++;
179 	QL_PM_UNLOCK(ha);
180 
181 	/* Acquire interrupt lock. */
182 	INTR_LOCK(ha);
183 
184 	if (CFG_IST(ha, CFG_CTRL_2200)) {
185 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
186 			/* Reset idle timer. */
187 			ha->idle_timer = 0;
188 			rval = DDI_INTR_CLAIMED;
189 			if (intr_loop) {
190 				intr_loop--;
191 			}
192 
193 			/* Special Fast Post 2200. */
194 			stat = 0;
195 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
196 			    ha->flags & ONLINE) {
197 				ql_srb_t	*sp;
198 
199 				mbx = RD16_IO_REG(ha, mailbox[23]);
200 
201 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
202 					/* Release mailbox registers. */
203 					WRT16_IO_REG(ha, semaphore, 0);
204 
205 					if (intr_loop) {
206 						WRT16_IO_REG(ha, hccr,
207 						    HC_CLR_RISC_INT);
208 					}
209 
210 					/* Get handle. */
211 					mbx >>= 4;
212 					stat = mbx & OSC_INDEX_MASK;
213 
214 					/* Validate handle. */
215 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
216 					    ha->outstanding_cmds[stat] : NULL;
217 
218 					if (sp != NULL && (sp->handle & 0xfff)
219 					    == mbx) {
220 						ha->outstanding_cmds[stat] =
221 						    NULL;
222 						sp->handle = 0;
223 						sp->flags &=
224 						    ~SRB_IN_TOKEN_ARRAY;
225 
226 						/* Set completed status. */
227 						sp->flags |= SRB_ISP_COMPLETED;
228 
229 						/* Set completion status */
230 						sp->pkt->pkt_reason =
231 						    CS_COMPLETE;
232 
233 						ql_fast_fcp_post(sp);
234 					} else if (mbx !=
235 					    (QL_FCA_BRAND & 0xfff)) {
236 						if (sp == NULL) {
237 							EL(ha, "unknown IOCB"
238 							    " handle=%xh\n",
239 							    mbx);
240 						} else {
241 							EL(ha, "mismatch IOCB"
242 							    " handle pkt=%xh, "
243 							    "sp=%xh\n", mbx,
244 							    sp->handle & 0xfff);
245 						}
246 
247 						(void) ql_binary_fw_dump(ha,
248 						    FALSE);
249 
250 						if (!(ha->task_daemon_flags &
251 						    (ISP_ABORT_NEEDED |
252 						    ABORT_ISP_ACTIVE))) {
253 							EL(ha, "ISP Invalid "
254 							    "handle, "
255 							    "isp_abort_needed"
256 							    "\n");
257 							set_flags |=
258 							    ISP_ABORT_NEEDED;
259 						}
260 					}
261 				}
262 			}
263 
264 			if (stat == 0) {
265 				/* Check for mailbox interrupt. */
266 				mbx = RD16_IO_REG(ha, semaphore);
267 				if (mbx & BIT_0) {
268 					/* Release mailbox registers. */
269 					WRT16_IO_REG(ha, semaphore, 0);
270 
271 					/* Get mailbox data. */
272 					mbx = RD16_IO_REG(ha, mailbox[0]);
273 					if (mbx > 0x3fff && mbx < 0x8000) {
274 						ql_mbx_completion(ha, mbx,
275 						    &set_flags, &reset_flags,
276 						    intr_loop);
277 					} else if (mbx > 0x7fff &&
278 					    mbx < 0xc000) {
279 						ql_async_event(ha, mbx,
280 						    &isr_done_q, &set_flags,
281 						    &reset_flags, intr_loop);
282 					} else {
283 						EL(ha, "UNKNOWN interrupt "
284 						    "type\n");
285 						intr = B_TRUE;
286 					}
287 				} else {
288 					ha->isp_rsp_index = RD16_IO_REG(ha,
289 					    resp_in);
290 
291 					if (ha->isp_rsp_index !=
292 					    ha->rsp_ring_index) {
293 						ql_response_pkt(ha,
294 						    &isr_done_q, &set_flags,
295 						    &reset_flags, intr_loop);
296 					} else if (++spurious_intr ==
297 					    MAX_SPURIOUS_INTR) {
298 						/*
299 						 * Process excessive
300 						 * spurious intrrupts
301 						 */
302 						ql_spurious_intr(ha,
303 						    intr_loop);
304 						EL(ha, "excessive spurious "
305 						    "interrupts, "
306 						    "isp_abort_needed\n");
307 						set_flags |= ISP_ABORT_NEEDED;
308 					} else {
309 						intr = B_TRUE;
310 					}
311 				}
312 			}
313 
314 			/* Clear RISC interrupt */
315 			if (intr || intr_loop == 0) {
316 				intr = B_FALSE;
317 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
318 			}
319 
320 			if (set_flags != 0 || reset_flags != 0) {
321 				TASK_DAEMON_LOCK(ha);
322 				ha->task_daemon_flags |= set_flags;
323 				ha->task_daemon_flags &= ~reset_flags;
324 				TASK_DAEMON_UNLOCK(ha);
325 				set_flags = 0;
326 				reset_flags = 0;
327 				daemon = B_TRUE;
328 			}
329 		}
330 	} else {
331 		while ((stat = RD32_IO_REG(ha, intr_info_lo)) & RH_RISC_INT) {
332 			/* Capture FW defined interrupt info */
333 			mbx = MSW(stat);
334 
335 			/* Reset idle timer. */
336 			ha->idle_timer = 0;
337 			rval = DDI_INTR_CLAIMED;
338 			if (intr_loop) {
339 				intr_loop--;
340 			}
341 
342 			switch (stat & 0x1ff) {
343 			case ROM_MBX_SUCCESS:
344 			case ROM_MBX_ERR:
345 				ql_mbx_completion(ha, mbx, &set_flags,
346 				    &reset_flags, intr_loop);
347 
348 				/* Release mailbox registers. */
349 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
350 					WRT16_IO_REG(ha, semaphore, 0);
351 				}
352 				break;
353 
354 			case MBX_SUCCESS:
355 			case MBX_ERR:
356 				/* Sun FW, Release mailbox registers. */
357 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
358 					WRT16_IO_REG(ha, semaphore, 0);
359 				}
360 				ql_mbx_completion(ha, mbx, &set_flags,
361 				    &reset_flags, intr_loop);
362 				break;
363 
364 			case ASYNC_EVENT:
365 				/* Sun FW, Release mailbox registers. */
366 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
367 					WRT16_IO_REG(ha, semaphore, 0);
368 				}
369 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
370 				    &set_flags, &reset_flags, intr_loop);
371 				break;
372 
373 			case RESP_UPDATE:
374 				if (mbx != ha->rsp_ring_index) {
375 					ha->isp_rsp_index = mbx;
376 					ql_response_pkt(ha, &isr_done_q,
377 					    &set_flags, &reset_flags,
378 					    intr_loop);
379 				} else if (++spurious_intr ==
380 				    MAX_SPURIOUS_INTR) {
381 					/* Process excessive spurious intr. */
382 					ql_spurious_intr(ha, intr_loop);
383 					EL(ha, "excessive spurious "
384 					    "interrupts, isp_abort_needed\n");
385 					set_flags |= ISP_ABORT_NEEDED;
386 				} else {
387 					intr = B_TRUE;
388 				}
389 				break;
390 
391 			case SCSI_FAST_POST_16:
392 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
393 				ql_async_event(ha, stat, &isr_done_q,
394 				    &set_flags, &reset_flags, intr_loop);
395 				break;
396 
397 			case SCSI_FAST_POST_32:
398 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
399 				ql_async_event(ha, stat, &isr_done_q,
400 				    &set_flags, &reset_flags, intr_loop);
401 				break;
402 
403 			case CTIO_FAST_POST:
404 				stat = (stat & 0xffff0000) |
405 				    MBA_CTIO_COMPLETION;
406 				ql_async_event(ha, stat, &isr_done_q,
407 				    &set_flags, &reset_flags, intr_loop);
408 				break;
409 
410 			case IP_FAST_POST_XMT:
411 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
412 				ql_async_event(ha, stat, &isr_done_q,
413 				    &set_flags, &reset_flags, intr_loop);
414 				break;
415 
416 			case IP_FAST_POST_RCV:
417 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
418 				ql_async_event(ha, stat, &isr_done_q,
419 				    &set_flags, &reset_flags, intr_loop);
420 				break;
421 
422 			case IP_FAST_POST_BRD:
423 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
424 				ql_async_event(ha, stat, &isr_done_q,
425 				    &set_flags, &reset_flags, intr_loop);
426 				break;
427 
428 			case IP_FAST_POST_RCV_ALN:
429 				stat = (stat & 0xffff0000) |
430 				    MBA_IP_HDR_DATA_SPLIT;
431 				ql_async_event(ha, stat, &isr_done_q,
432 				    &set_flags, &reset_flags, intr_loop);
433 				break;
434 
435 			case ATIO_UPDATE:
436 				EL(ha, "unsupported ATIO queue update"
437 				    " interrupt, status=%xh\n", stat);
438 				intr = B_TRUE;
439 				break;
440 
441 			case ATIO_RESP_UPDATE:
442 				EL(ha, "unsupported ATIO response queue "
443 				    "update interrupt, status=%xh\n", stat);
444 				intr = B_TRUE;
445 				break;
446 
447 			default:
448 				ql_handle_uncommon_risc_intr(ha, stat,
449 				    &set_flags);
450 				intr = B_TRUE;
451 				break;
452 			}
453 
454 			/* Clear RISC interrupt */
455 			if (intr || intr_loop == 0) {
456 				intr = B_FALSE;
457 				CFG_IST(ha, CFG_CTRL_242581) ?
458 				    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
459 				    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
460 			}
461 
462 			if (set_flags != 0 || reset_flags != 0) {
463 				TASK_DAEMON_LOCK(ha);
464 				ha->task_daemon_flags |= set_flags;
465 				ha->task_daemon_flags &= ~reset_flags;
466 				TASK_DAEMON_UNLOCK(ha);
467 				set_flags = 0;
468 				reset_flags = 0;
469 				daemon = B_TRUE;
470 			}
471 
472 			if (ha->flags & PARITY_ERROR) {
473 				EL(ha, "parity/pause exit\n");
474 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
475 				break;
476 			}
477 		}
478 	}
479 
480 	/* Process claimed interrupts during polls. */
481 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
482 		ha->intr_claimed = B_FALSE;
483 		rval = DDI_INTR_CLAIMED;
484 	}
485 
486 	/* Release interrupt lock. */
487 	INTR_UNLOCK(ha);
488 
489 	if (daemon) {
490 		ql_awaken_task_daemon(ha, NULL, 0, 0);
491 	}
492 
493 	if (isr_done_q.first != NULL) {
494 		ql_done(isr_done_q.first);
495 	}
496 
497 	if (rval == DDI_INTR_CLAIMED) {
498 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
499 		ha->xioctl->TotalInterrupts++;
500 	} else {
501 		/*EMPTY*/
502 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
503 		    ha->instance);
504 	}
505 
506 	QL_PM_LOCK(ha);
507 	ha->busy--;
508 	QL_PM_UNLOCK(ha);
509 
510 	return (rval);
511 }
512 
513 /*
514  * ql_handle_uncommon_risc_intr
515  *	Handle an uncommon RISC interrupt.
516  *
517  * Input:
518  *	ha:		adapter state pointer.
519  *	stat:		interrupt status
520  *
521  * Context:
522  *	Interrupt or Kernel context, no mailbox commands allowed.
523  */
524 static void
525 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
526     uint32_t *set_flags)
527 {
528 	uint16_t	hccr_reg;
529 
530 	hccr_reg = RD16_IO_REG(ha, hccr);
531 
532 	if (stat & RH_RISC_PAUSED ||
533 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
534 
535 		ADAPTER_STATE_LOCK(ha);
536 		ha->flags |= PARITY_ERROR;
537 		ADAPTER_STATE_UNLOCK(ha);
538 
539 		if (ha->parity_pause_errors == 0 ||
540 		    ha->parity_hccr_err != hccr_reg ||
541 		    ha->parity_stat_err != stat) {
542 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
543 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
544 			    ha->instance, hccr_reg, stat,
545 			    ha->parity_pause_errors);
546 			ha->parity_hccr_err = hccr_reg;
547 			ha->parity_stat_err = stat;
548 		}
549 
550 		EL(ha, "parity/pause error, isp_abort_needed\n");
551 
552 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
553 			ql_reset_chip(ha);
554 		}
555 
556 		if (ha->parity_pause_errors == 0) {
557 			(void) ql_flash_errlog(ha, FLASH_ERRLOG_PARITY_ERR,
558 			    0, MSW(stat), LSW(stat));
559 		}
560 
561 		if (ha->parity_pause_errors < 0xffffffff) {
562 			ha->parity_pause_errors++;
563 		}
564 
565 		*set_flags |= ISP_ABORT_NEEDED;
566 
567 		/* Disable ISP interrupts. */
568 		WRT16_IO_REG(ha, ictrl, 0);
569 		ADAPTER_STATE_LOCK(ha);
570 		ha->flags &= ~INTERRUPTS_ENABLED;
571 		ADAPTER_STATE_UNLOCK(ha);
572 	} else {
573 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
574 		    stat, hccr_reg);
575 	}
576 }
577 
578 /*
579  * ql_spurious_intr
580  *	Inform Solaris of spurious interrupts.
581  *
582  * Input:
583  *	ha:		adapter state pointer.
584  *	intr_clr:	early interrupt clear
585  *
586  * Context:
587  *	Interrupt or Kernel context, no mailbox commands allowed.
588  */
589 static void
590 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
591 {
592 	ddi_devstate_t	state;
593 
594 	EL(ha, "Spurious interrupt\n");
595 
596 	/* Disable ISP interrupts. */
597 	WRT16_IO_REG(ha, ictrl, 0);
598 	ADAPTER_STATE_LOCK(ha);
599 	ha->flags &= ~INTERRUPTS_ENABLED;
600 	ADAPTER_STATE_UNLOCK(ha);
601 
602 	/* Clear RISC interrupt */
603 	if (intr_clr) {
604 		CFG_IST(ha, CFG_CTRL_242581) ?
605 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
606 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
607 	}
608 
609 	state = ddi_get_devstate(ha->dip);
610 	if (state == DDI_DEVSTATE_UP) {
611 		/*EMPTY*/
612 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
613 		    DDI_DEVICE_FAULT, "spurious interrupts");
614 	}
615 }
616 
617 /*
618  * ql_mbx_completion
619  *	Processes mailbox completions.
620  *
621  * Input:
622  *	ha:		adapter state pointer.
623  *	mb0:		Mailbox 0 contents.
624  *	set_flags:	task daemon flags to set.
625  *	reset_flags:	task daemon flags to reset.
626  *	intr_clr:	early interrupt clear
627  *
628  * Context:
629  *	Interrupt context.
630  */
631 /* ARGSUSED */
632 static void
633 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
634     uint32_t *reset_flags, int intr_clr)
635 {
636 	uint32_t	index;
637 	uint16_t	cnt;
638 
639 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
640 
641 	/* Load return mailbox registers. */
642 	MBX_REGISTER_LOCK(ha);
643 
644 	if (ha->mcp != NULL) {
645 		ha->mcp->mb[0] = mb0;
646 		index = ha->mcp->in_mb & ~MBX_0;
647 
648 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
649 			index >>= 1;
650 			if (index & MBX_0) {
651 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
652 				    mailbox[cnt]);
653 			}
654 		}
655 
656 	} else {
657 		EL(ha, "mcp == NULL\n");
658 	}
659 
660 	if (intr_clr) {
661 		/* Clear RISC interrupt. */
662 		CFG_IST(ha, CFG_CTRL_242581) ?
663 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
664 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
665 	}
666 
667 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
668 	if (ha->flags & INTERRUPTS_ENABLED) {
669 		cv_broadcast(&ha->cv_mbx_intr);
670 	}
671 
672 	MBX_REGISTER_UNLOCK(ha);
673 
674 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
675 }
676 
677 /*
678  * ql_async_event
679  *	Processes asynchronous events.
680  *
681  * Input:
682  *	ha:		adapter state pointer.
683  *	mbx:		Mailbox 0 register.
684  *	done_q:		head pointer to done queue.
685  *	set_flags:	task daemon flags to set.
686  *	reset_flags:	task daemon flags to reset.
687  *	intr_clr:	early interrupt clear
688  *
689  * Context:
690  *	Interrupt or Kernel context, no mailbox commands allowed.
691  */
692 static void
693 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
694     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
695 {
696 	uint32_t		handle;
697 	uint32_t		index;
698 	uint16_t		cnt;
699 	uint16_t		mb[MAX_MBOX_COUNT];
700 	ql_srb_t		*sp;
701 	port_id_t		s_id;
702 	ql_tgt_t		*tq;
703 	boolean_t		intr = B_TRUE;
704 	ql_adapter_state_t	*vha;
705 
706 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
707 
708 	/* Setup to process fast completion. */
709 	mb[0] = LSW(mbx);
710 	switch (mb[0]) {
711 	case MBA_SCSI_COMPLETION:
712 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox[1]),
713 		    RD16_IO_REG(ha, mailbox[2]));
714 		break;
715 
716 	case MBA_CMPLT_1_16BIT:
717 		handle = MSW(mbx);
718 		mb[0] = MBA_SCSI_COMPLETION;
719 		break;
720 
721 	case MBA_CMPLT_1_32BIT:
722 		handle = SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
723 		mb[0] = MBA_SCSI_COMPLETION;
724 		break;
725 
726 	case MBA_CTIO_COMPLETION:
727 	case MBA_IP_COMPLETION:
728 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
729 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2])) :
730 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
731 		mb[0] = MBA_SCSI_COMPLETION;
732 		break;
733 
734 	default:
735 		break;
736 	}
737 
738 	/* Handle asynchronous event */
739 	switch (mb[0]) {
740 	case MBA_SCSI_COMPLETION:
741 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
742 		    ha->instance);
743 
744 		if (intr_clr) {
745 			/* Clear RISC interrupt */
746 			CFG_IST(ha, CFG_CTRL_242581) ?
747 			    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
748 			    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
749 			intr = B_FALSE;
750 		}
751 
752 		if ((ha->flags & ONLINE) == 0) {
753 			break;
754 		}
755 
756 		/* Get handle. */
757 		index = handle & OSC_INDEX_MASK;
758 
759 		/* Validate handle. */
760 		sp = index < MAX_OUTSTANDING_COMMANDS ?
761 		    ha->outstanding_cmds[index] : NULL;
762 
763 		if (sp != NULL && sp->handle == handle) {
764 			ha->outstanding_cmds[index] = NULL;
765 			sp->handle = 0;
766 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
767 
768 			/* Set completed status. */
769 			sp->flags |= SRB_ISP_COMPLETED;
770 
771 			/* Set completion status */
772 			sp->pkt->pkt_reason = CS_COMPLETE;
773 
774 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
775 				/* Place block on done queue */
776 				ql_add_link_b(done_q, &sp->cmd);
777 			} else {
778 				ql_fast_fcp_post(sp);
779 			}
780 		} else if (handle != QL_FCA_BRAND) {
781 			if (sp == NULL) {
782 				EL(ha, "%xh unknown IOCB handle=%xh\n",
783 				    mb[0], handle);
784 			} else {
785 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
786 				    "sp=%xh\n", mb[0], handle, sp->handle);
787 			}
788 
789 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
790 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
791 			    RD16_IO_REG(ha, mailbox[1]),
792 			    RD16_IO_REG(ha, mailbox[2]),
793 			    RD16_IO_REG(ha, mailbox[3]),
794 			    RD16_IO_REG(ha, mailbox[6]),
795 			    RD16_IO_REG(ha, mailbox[7]));
796 
797 			(void) ql_binary_fw_dump(ha, FALSE);
798 
799 			if (!(ha->task_daemon_flags &
800 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
801 				EL(ha, "%xh ISP Invalid handle, "
802 				    "isp_abort_needed\n", mb[0]);
803 				*set_flags |= ISP_ABORT_NEEDED;
804 			}
805 		}
806 		break;
807 
808 	case MBA_RESET:		/* Reset */
809 		EL(ha, "%xh Reset received\n", mb[0]);
810 		*set_flags |= RESET_MARKER_NEEDED;
811 		break;
812 
813 	case MBA_SYSTEM_ERR:		/* System Error */
814 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
815 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
816 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
817 		mb[7] = RD16_IO_REG(ha, mailbox[7]);
818 
819 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
820 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
821 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
822 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
823 		    RD16_IO_REG(ha, mailbox[4]), RD16_IO_REG(ha, mailbox[5]),
824 		    RD16_IO_REG(ha, mailbox[6]), mb[7],
825 		    RD16_IO_REG(ha, mailbox[8]), RD16_IO_REG(ha, mailbox[9]),
826 		    RD16_IO_REG(ha, mailbox[10]), RD16_IO_REG(ha, mailbox[11]),
827 		    RD16_IO_REG(ha, mailbox[12]));
828 
829 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
830 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
831 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
832 		    mb[0], RD16_IO_REG(ha, mailbox[13]),
833 		    RD16_IO_REG(ha, mailbox[14]), RD16_IO_REG(ha, mailbox[15]),
834 		    RD16_IO_REG(ha, mailbox[16]), RD16_IO_REG(ha, mailbox[17]),
835 		    RD16_IO_REG(ha, mailbox[18]), RD16_IO_REG(ha, mailbox[19]),
836 		    RD16_IO_REG(ha, mailbox[20]), RD16_IO_REG(ha, mailbox[21]),
837 		    RD16_IO_REG(ha, mailbox[22]),
838 		    RD16_IO_REG(ha, mailbox[23]));
839 
840 		if (ha->reg_off->mbox_cnt > 24) {
841 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
842 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
843 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
844 			    RD16_IO_REG(ha, mailbox[24]),
845 			    RD16_IO_REG(ha, mailbox[25]),
846 			    RD16_IO_REG(ha, mailbox[26]),
847 			    RD16_IO_REG(ha, mailbox[27]),
848 			    RD16_IO_REG(ha, mailbox[28]),
849 			    RD16_IO_REG(ha, mailbox[29]),
850 			    RD16_IO_REG(ha, mailbox[30]),
851 			    RD16_IO_REG(ha, mailbox[31]));
852 		}
853 
854 		(void) ql_binary_fw_dump(ha, FALSE);
855 
856 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
857 		    mb[2], mb[3]);
858 
859 		if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
860 			ADAPTER_STATE_LOCK(ha);
861 			ha->flags |= MPI_RESET_NEEDED;
862 			ADAPTER_STATE_UNLOCK(ha);
863 		}
864 
865 		*set_flags |= ISP_ABORT_NEEDED;
866 		ha->xioctl->ControllerErrorCount++;
867 		break;
868 
869 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
870 		EL(ha, "%xh Request Transfer Error received, "
871 		    "isp_abort_needed\n", mb[0]);
872 
873 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
874 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
875 		    RD16_IO_REG(ha, mailbox[3]));
876 
877 		*set_flags |= ISP_ABORT_NEEDED;
878 		ha->xioctl->ControllerErrorCount++;
879 		break;
880 
881 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
882 		EL(ha, "%xh Response Transfer Error received,"
883 		    " isp_abort_needed\n", mb[0]);
884 
885 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
886 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
887 		    RD16_IO_REG(ha, mailbox[3]));
888 
889 		*set_flags |= ISP_ABORT_NEEDED;
890 		ha->xioctl->ControllerErrorCount++;
891 		break;
892 
893 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
894 		EL(ha, "%xh Request Queue Wake-up received\n",
895 		    mb[0]);
896 		break;
897 
898 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
899 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
900 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
901 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
902 
903 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
904 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
905 
906 		switch (mb[1]) {
907 		case MLA_LOGIN_OPERATIONAL_FW:
908 			ADAPTER_STATE_LOCK(ha);
909 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
910 			ADAPTER_STATE_UNLOCK(ha);
911 			break;
912 		case MLA_PANIC_RECOVERY:
913 		case MLA_LOGIN_DIAGNOSTIC_FW:
914 		case MLA_LOGIN_GOLDEN_FW:
915 		case MLA_REJECT_RESPONSE:
916 		default:
917 			break;
918 		}
919 		break;
920 
921 	case MBA_LIP_F8:	/* Received a LIP F8. */
922 	case MBA_LIP_RESET:	/* LIP reset occurred. */
923 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
924 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
925 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
926 			    "\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
927 			    RD16_IO_REG(ha, mailbox[2]));
928 		} else {
929 			EL(ha, "%xh LIP received\n", mb[0]);
930 		}
931 
932 		ADAPTER_STATE_LOCK(ha);
933 		ha->flags &= ~POINT_TO_POINT;
934 		ADAPTER_STATE_UNLOCK(ha);
935 
936 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
937 			*set_flags |= LOOP_DOWN;
938 		}
939 		ql_port_state(ha, FC_STATE_OFFLINE,
940 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
941 
942 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
943 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
944 		}
945 
946 		ha->adapter_stats->lip_count++;
947 
948 		/* Update AEN queue. */
949 		ha->xioctl->TotalLipResets++;
950 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
951 			ql_enqueue_aen(ha, mb[0], NULL);
952 		}
953 		break;
954 
955 	case MBA_LOOP_UP:
956 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
957 		    CFG_CTRL_242581))) {
958 			mb[1] = RD16_IO_REG(ha, mailbox[1]);
959 			if (mb[1] == IIDMA_RATE_1GB) {		/* 1GB */
960 				ha->state = FC_PORT_STATE_MASK(
961 				    ha->state) | FC_STATE_1GBIT_SPEED;
962 				index = 1;
963 			} else if (mb[1] == IIDMA_RATE_2GB) {	/* 2GB */
964 				ha->state = FC_PORT_STATE_MASK(
965 				    ha->state) | FC_STATE_2GBIT_SPEED;
966 				index = 2;
967 			} else if (mb[1] == IIDMA_RATE_4GB) {	/* 4GB */
968 				ha->state = FC_PORT_STATE_MASK(
969 				    ha->state) | FC_STATE_4GBIT_SPEED;
970 				index = 4;
971 			} else if (mb[1] == IIDMA_RATE_8GB) {	/* 8GB */
972 				ha->state = FC_PORT_STATE_MASK(
973 				    ha->state) | FC_STATE_8GBIT_SPEED;
974 				index = 8;
975 			} else if (mb[1] == IIDMA_RATE_10GB) {	/* 10GB */
976 				ha->state = FC_PORT_STATE_MASK(
977 				    ha->state) | FC_STATE_10GBIT_SPEED;
978 				index = 10;
979 			} else {
980 				ha->state = FC_PORT_STATE_MASK(
981 				    ha->state);
982 				index = 0;
983 			}
984 		} else {
985 			ha->state = FC_PORT_STATE_MASK(ha->state) |
986 			    FC_STATE_FULL_SPEED;
987 			index = 1;
988 		}
989 
990 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
991 			vha->state = FC_PORT_STATE_MASK(vha->state) |
992 			    FC_PORT_SPEED_MASK(ha->state);
993 		}
994 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
995 
996 		/* Update AEN queue. */
997 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
998 			ql_enqueue_aen(ha, mb[0], NULL);
999 		}
1000 		break;
1001 
1002 	case MBA_LOOP_DOWN:
1003 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1004 		    "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1005 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]),
1006 		    RD16_IO_REG(ha, mailbox[4]));
1007 
1008 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1009 			*set_flags |= LOOP_DOWN;
1010 		}
1011 		ql_port_state(ha, FC_STATE_OFFLINE,
1012 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1013 
1014 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1015 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1016 		}
1017 
1018 		if (CFG_IST(ha, CFG_CTRL_2581)) {
1019 			ha->sfp_stat = RD16_IO_REG(ha, mailbox[2]);
1020 		}
1021 
1022 		/* Update AEN queue. */
1023 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1024 			ql_enqueue_aen(ha, mb[0], NULL);
1025 		}
1026 		break;
1027 
1028 	case MBA_PORT_UPDATE:
1029 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1030 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1031 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1032 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1033 
1034 		/* Locate port state structure. */
1035 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1036 			if (vha->vp_index == LSB(mb[3])) {
1037 				break;
1038 			}
1039 		}
1040 		if (vha == NULL) {
1041 			break;
1042 		}
1043 		/*
1044 		 * In N port 2 N port topology the FW provides a port
1045 		 * database entry at loop_id 0x7fe which we use to
1046 		 * acquire the Ports WWPN.
1047 		 */
1048 		if ((mb[1] != 0x7fe) &&
1049 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1050 		    (CFG_IST(ha, CFG_CTRL_242581) &&
1051 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1052 			EL(ha, "%xh Port Database Update, Login/Logout "
1053 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1054 			    mb[0], mb[1], mb[2], mb[3]);
1055 		} else {
1056 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1057 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1058 			    mb[3]);
1059 			*set_flags |= LOOP_RESYNC_NEEDED;
1060 			*set_flags &= ~LOOP_DOWN;
1061 			*reset_flags |= LOOP_DOWN;
1062 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1063 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1064 			TASK_DAEMON_LOCK(ha);
1065 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1066 			vha->task_daemon_flags &= ~LOOP_DOWN;
1067 			TASK_DAEMON_UNLOCK(ha);
1068 			ADAPTER_STATE_LOCK(ha);
1069 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1070 			ADAPTER_STATE_UNLOCK(ha);
1071 		}
1072 
1073 		/* Update AEN queue. */
1074 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1075 			ql_enqueue_aen(ha, mb[0], NULL);
1076 		}
1077 		break;
1078 
1079 	case MBA_RSCN_UPDATE:
1080 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1081 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1082 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1083 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1084 
1085 		/* Locate port state structure. */
1086 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1087 			if (vha->vp_index == LSB(mb[3])) {
1088 				break;
1089 			}
1090 		}
1091 
1092 		if (vha == NULL) {
1093 			break;
1094 		}
1095 
1096 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1097 		    MSB(mb[2]) == vha->d_id.b.area &&
1098 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1099 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1100 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1101 		} else {
1102 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1103 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1104 			if (FC_PORT_STATE_MASK(vha->state) !=
1105 			    FC_STATE_OFFLINE) {
1106 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1107 				TASK_DAEMON_LOCK(ha);
1108 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1109 				TASK_DAEMON_UNLOCK(ha);
1110 				*set_flags |= RSCN_UPDATE_NEEDED;
1111 			}
1112 		}
1113 
1114 		/* Update AEN queue. */
1115 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1116 			ql_enqueue_aen(ha, mb[0], NULL);
1117 		}
1118 		break;
1119 
1120 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1121 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1122 		    RD16_IO_REG(ha, mailbox[1]));
1123 		break;
1124 
1125 	case MBA_IP_RECEIVE:
1126 	case MBA_IP_BROADCAST:
1127 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1128 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1129 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
1130 
1131 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1132 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1133 
1134 		/* Locate device queue. */
1135 		s_id.b.al_pa = LSB(mb[2]);
1136 		s_id.b.area = MSB(mb[2]);
1137 		s_id.b.domain = LSB(mb[1]);
1138 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1139 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1140 			break;
1141 		}
1142 
1143 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1144 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1145 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1146 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1147 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1148 
1149 		tq->ub_sequence_length = mb[3];
1150 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1151 		if (mb[3] % cnt) {
1152 			tq->ub_total_seg_cnt++;
1153 		}
1154 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1155 
1156 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1157 		    index++) {
1158 			mb[index] = RD16_IO_REG(ha, mailbox[index]);
1159 		}
1160 
1161 		tq->ub_seq_id = ++ha->ub_seq_id;
1162 		tq->ub_seq_cnt = 0;
1163 		tq->ub_frame_ro = 0;
1164 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1165 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
1166 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1167 		ha->rcv_dev_q = tq;
1168 
1169 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1170 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1171 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1172 			    QL_SUCCESS) {
1173 				EL(ha, "ql_ub_frame_hdr failed, "
1174 				    "isp_abort_needed\n");
1175 				*set_flags |= ISP_ABORT_NEEDED;
1176 				break;
1177 			}
1178 		}
1179 		break;
1180 
1181 	case MBA_IP_LOW_WATER_MARK:
1182 	case MBA_IP_RCV_BUFFER_EMPTY:
1183 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1184 		    mb[0]);
1185 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1186 		break;
1187 
1188 	case MBA_IP_HDR_DATA_SPLIT:
1189 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1190 		break;
1191 
1192 	case MBA_ERROR_LOGGING_DISABLED:
1193 		EL(ha, "%xh error logging disabled received, "
1194 		    "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]));
1195 		break;
1196 
1197 	case MBA_POINT_TO_POINT:
1198 	/* case MBA_DCBX_COMPLETED: */
1199 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1200 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1201 		} else {
1202 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1203 		}
1204 		ADAPTER_STATE_LOCK(ha);
1205 		ha->flags |= POINT_TO_POINT;
1206 		ADAPTER_STATE_UNLOCK(ha);
1207 		break;
1208 
1209 	case MBA_FCF_CONFIG_ERROR:
1210 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1211 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1212 		break;
1213 
1214 	case MBA_DCBX_PARAM_CHANGED:
1215 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1216 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1217 		break;
1218 
1219 	case MBA_CHG_IN_CONNECTION:
1220 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1221 		if (mb[1] == 2) {
1222 			EL(ha, "%xh Change In Connection received, "
1223 			    "mbx1=%xh\n",  mb[0], mb[1]);
1224 			ADAPTER_STATE_LOCK(ha);
1225 			ha->flags &= ~POINT_TO_POINT;
1226 			ADAPTER_STATE_UNLOCK(ha);
1227 			if (ha->topology & QL_N_PORT) {
1228 				ha->topology = (uint8_t)(ha->topology &
1229 				    ~QL_N_PORT);
1230 				ha->topology = (uint8_t)(ha->topology |
1231 				    QL_NL_PORT);
1232 			}
1233 		} else {
1234 			EL(ha, "%xh Change In Connection received, "
1235 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1236 			*set_flags |= ISP_ABORT_NEEDED;
1237 		}
1238 		break;
1239 
1240 	case MBA_ZIO_UPDATE:
1241 		EL(ha, "%xh ZIO response received\n", mb[0]);
1242 
1243 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1244 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1245 		intr = B_FALSE;
1246 		break;
1247 
1248 	case MBA_PORT_BYPASS_CHANGED:
1249 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1250 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1251 		/*
1252 		 * Event generated when there is a transition on
1253 		 * port bypass of crystal+.
1254 		 * Mailbox 1:	Bit 0 - External.
1255 		 *		Bit 2 - Internal.
1256 		 * When the bit is 0, the port is bypassed.
1257 		 *
1258 		 * For now we will generate a LIP for all cases.
1259 		 */
1260 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1261 		break;
1262 
1263 	case MBA_RECEIVE_ERROR:
1264 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1265 		    mb[0], RD16_IO_REG(ha, mailbox[1]),
1266 		    RD16_IO_REG(ha, mailbox[2]));
1267 		break;
1268 
1269 	case MBA_LS_RJT_SENT:
1270 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1271 		    RD16_IO_REG(ha, mailbox[1]));
1272 		break;
1273 
1274 	case MBA_FW_RESTART_COMP:
1275 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1276 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1277 		break;
1278 
1279 	case MBA_IDC_COMPLETE:
1280 		EL(ha, "%xh Inter-driver communication complete received, "
1281 		    "mbx1=%xh, mbx2=%xh\n", mb[0],
1282 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1283 		break;
1284 
1285 	case MBA_IDC_NOTIFICATION:
1286 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox[1]);
1287 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox[2]);
1288 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox[3]);
1289 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox[4]);
1290 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox[5]);
1291 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox[6]);
1292 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox[7]);
1293 		EL(ha, "%xh Inter-driver communication request notification "
1294 		    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1295 		    "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1296 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1297 		    ha->idc_mb[6], ha->idc_mb[7]);
1298 		*set_flags |= IDC_ACK_NEEDED;
1299 		break;
1300 
1301 	case MBA_IDC_TIME_EXTENDED:
1302 		EL(ha, "%xh Inter-driver communication time extended received,"
1303 		    " mbx1=%xh, mbx2=%xh\n", mb[0],
1304 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1305 		break;
1306 
1307 	default:
1308 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1309 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1310 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
1311 		break;
1312 	}
1313 
1314 	/* Clear RISC interrupt */
1315 	if (intr && intr_clr) {
1316 		CFG_IST(ha, CFG_CTRL_242581) ?
1317 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1318 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1319 	}
1320 
1321 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1322 }
1323 
1324 /*
1325  * ql_fast_fcp_post
1326  *	Fast path for good SCSI I/O completion.
1327  *
1328  * Input:
1329  *	sp:	SRB pointer.
1330  *
1331  * Context:
1332  *	Interrupt or Kernel context, no mailbox commands allowed.
1333  */
1334 static void
1335 ql_fast_fcp_post(ql_srb_t *sp)
1336 {
1337 	ql_adapter_state_t	*ha = sp->ha;
1338 	ql_lun_t		*lq = sp->lun_queue;
1339 	ql_tgt_t		*tq = lq->target_queue;
1340 
1341 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1342 
1343 	/* Acquire device queue lock. */
1344 	DEVICE_QUEUE_LOCK(tq);
1345 
1346 	/* Decrement outstanding commands on device. */
1347 	if (tq->outcnt != 0) {
1348 		tq->outcnt--;
1349 	}
1350 
1351 	if (sp->flags & SRB_FCP_CMD_PKT) {
1352 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1353 			/*
1354 			 * Clear the flag for this LUN so that
1355 			 * untagged commands can be submitted
1356 			 * for it.
1357 			 */
1358 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1359 		}
1360 
1361 		if (lq->lun_outcnt != 0) {
1362 			lq->lun_outcnt--;
1363 		}
1364 	}
1365 
1366 	/* Reset port down retry count on good completion. */
1367 	tq->port_down_retry_count = ha->port_down_retry_count;
1368 	tq->qfull_retry_count = ha->qfull_retry_count;
1369 
1370 	/* Remove command from watchdog queue. */
1371 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1372 		ql_remove_link(&tq->wdg, &sp->wdg);
1373 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1374 	}
1375 
1376 	if (lq->cmd.first != NULL) {
1377 		ql_next(ha, lq);
1378 	} else {
1379 		/* Release LU queue specific lock. */
1380 		DEVICE_QUEUE_UNLOCK(tq);
1381 		if (ha->pha->pending_cmds.first != NULL) {
1382 			ql_start_iocb(ha, NULL);
1383 		}
1384 	}
1385 
1386 	/* Sync buffers if required.  */
1387 	if (sp->flags & SRB_MS_PKT) {
1388 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1389 		    DDI_DMA_SYNC_FORCPU);
1390 	}
1391 
1392 	/* Map ISP completion codes. */
1393 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1394 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1395 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1396 
1397 	/* Now call the pkt completion callback */
1398 	if (sp->flags & SRB_POLL) {
1399 		sp->flags &= ~SRB_POLL;
1400 	} else if (sp->pkt->pkt_comp) {
1401 		INTR_UNLOCK(ha);
1402 		(*sp->pkt->pkt_comp)(sp->pkt);
1403 		INTR_LOCK(ha);
1404 	}
1405 
1406 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1407 }
1408 
1409 /*
1410  * ql_response_pkt
1411  *	Processes response entry.
1412  *
1413  * Input:
1414  *	ha:		adapter state pointer.
1415  *	done_q:		head pointer to done queue.
1416  *	set_flags:	task daemon flags to set.
1417  *	reset_flags:	task daemon flags to reset.
1418  *	intr_clr:	early interrupt clear
1419  *
1420  * Context:
1421  *	Interrupt or Kernel context, no mailbox commands allowed.
1422  */
1423 static void
1424 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1425     uint32_t *reset_flags, int intr_clr)
1426 {
1427 	response_t	*pkt;
1428 	uint32_t	dma_sync_size_1 = 0;
1429 	uint32_t	dma_sync_size_2 = 0;
1430 	int		status = 0;
1431 
1432 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1433 
1434 	/* Clear RISC interrupt */
1435 	if (intr_clr) {
1436 		CFG_IST(ha, CFG_CTRL_242581) ?
1437 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1438 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1439 	}
1440 
1441 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1442 		EL(ha, "index error = %xh, isp_abort_needed",
1443 		    ha->isp_rsp_index);
1444 		*set_flags |= ISP_ABORT_NEEDED;
1445 		return;
1446 	}
1447 
1448 	if ((ha->flags & ONLINE) == 0) {
1449 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1450 		return;
1451 	}
1452 
1453 	/* Calculate size of response queue entries to sync. */
1454 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1455 		dma_sync_size_1 = (uint32_t)
1456 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1457 		    RESPONSE_ENTRY_SIZE);
1458 	} else if (ha->isp_rsp_index == 0) {
1459 		dma_sync_size_1 = (uint32_t)
1460 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1461 		    RESPONSE_ENTRY_SIZE);
1462 	} else {
1463 		/* Responses wrap around the Q */
1464 		dma_sync_size_1 = (uint32_t)
1465 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1466 		    RESPONSE_ENTRY_SIZE);
1467 		dma_sync_size_2 = (uint32_t)
1468 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1469 	}
1470 
1471 	/* Sync DMA buffer. */
1472 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1473 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1474 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1475 	    DDI_DMA_SYNC_FORKERNEL);
1476 	if (dma_sync_size_2) {
1477 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1478 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1479 		    DDI_DMA_SYNC_FORKERNEL);
1480 	}
1481 
1482 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1483 		pkt = ha->response_ring_ptr;
1484 
1485 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1486 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1487 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1488 		    RESPONSE_ENTRY_SIZE);
1489 
1490 		/* Adjust ring index. */
1491 		ha->rsp_ring_index++;
1492 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1493 			ha->rsp_ring_index = 0;
1494 			ha->response_ring_ptr = ha->response_ring_bp;
1495 		} else {
1496 			ha->response_ring_ptr++;
1497 		}
1498 
1499 		/* Process packet. */
1500 		if (ha->status_srb != NULL && pkt->entry_type !=
1501 		    STATUS_CONT_TYPE) {
1502 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1503 			ha->status_srb = NULL;
1504 		}
1505 
1506 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1507 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1508 
1509 		if (pkt->entry_status != 0) {
1510 			ql_error_entry(ha, pkt, done_q, set_flags,
1511 			    reset_flags);
1512 		} else {
1513 			switch (pkt->entry_type) {
1514 			case STATUS_TYPE:
1515 				status |= CFG_IST(ha, CFG_CTRL_242581) ?
1516 				    ql_24xx_status_entry(ha,
1517 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1518 				    reset_flags) :
1519 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1520 				    done_q, set_flags, reset_flags);
1521 				break;
1522 			case STATUS_CONT_TYPE:
1523 				ql_status_cont_entry(ha,
1524 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1525 				    reset_flags);
1526 				break;
1527 			case IP_TYPE:
1528 			case IP_A64_TYPE:
1529 			case IP_CMD_TYPE:
1530 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1531 				    set_flags, reset_flags);
1532 				break;
1533 			case IP_RECEIVE_TYPE:
1534 				ql_ip_rcv_entry(ha,
1535 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1536 				    reset_flags);
1537 				break;
1538 			case IP_RECEIVE_CONT_TYPE:
1539 				ql_ip_rcv_cont_entry(ha,
1540 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1541 				    set_flags, reset_flags);
1542 				break;
1543 			case IP_24XX_RECEIVE_TYPE:
1544 				ql_ip_24xx_rcv_entry(ha,
1545 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1546 				    set_flags, reset_flags);
1547 				break;
1548 			case MS_TYPE:
1549 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1550 				    set_flags, reset_flags);
1551 				break;
1552 			case REPORT_ID_TYPE:
1553 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1554 				    done_q, set_flags, reset_flags);
1555 				break;
1556 			case ELS_PASSTHRU_TYPE:
1557 				ql_els_passthru_entry(ha,
1558 				    (els_passthru_entry_rsp_t *)pkt,
1559 				    done_q, set_flags, reset_flags);
1560 				break;
1561 			case IP_BUF_POOL_TYPE:
1562 			case MARKER_TYPE:
1563 			case VP_MODIFY_TYPE:
1564 			case VP_CONTROL_TYPE:
1565 				break;
1566 			default:
1567 				EL(ha, "Unknown IOCB entry type=%xh\n",
1568 				    pkt->entry_type);
1569 				break;
1570 			}
1571 		}
1572 	}
1573 
1574 	/* Inform RISC of processed responses. */
1575 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1576 
1577 	/* RESET packet received delay for possible async event. */
1578 	if (status & BIT_0) {
1579 		drv_usecwait(500000);
1580 	}
1581 
1582 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1583 }
1584 
1585 /*
1586  * ql_error_entry
1587  *	Processes error entry.
1588  *
1589  * Input:
1590  *	ha = adapter state pointer.
1591  *	pkt = entry pointer.
1592  *	done_q = head pointer to done queue.
1593  *	set_flags = task daemon flags to set.
1594  *	reset_flags = task daemon flags to reset.
1595  *
1596  * Context:
1597  *	Interrupt or Kernel context, no mailbox commands allowed.
1598  */
1599 /* ARGSUSED */
1600 static void
1601 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1602     uint32_t *set_flags, uint32_t *reset_flags)
1603 {
1604 	ql_srb_t	*sp;
1605 	uint32_t	index, resp_identifier;
1606 
1607 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1608 		EL(ha, "Aborted command\n");
1609 		return;
1610 	}
1611 
1612 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1613 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1614 
1615 	if (pkt->entry_status & BIT_6) {
1616 		EL(ha, "Request Queue DMA error\n");
1617 	} else if (pkt->entry_status & BIT_5) {
1618 		EL(ha, "Invalid Entry Order\n");
1619 	} else if (pkt->entry_status & BIT_4) {
1620 		EL(ha, "Invalid Entry Count\n");
1621 	} else if (pkt->entry_status & BIT_3) {
1622 		EL(ha, "Invalid Entry Parameter\n");
1623 	} else if (pkt->entry_status & BIT_2) {
1624 		EL(ha, "Invalid Entry Type\n");
1625 	} else if (pkt->entry_status & BIT_1) {
1626 		EL(ha, "Busy\n");
1627 	} else {
1628 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1629 	}
1630 
1631 	/* Validate the response entry handle. */
1632 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1633 	index = resp_identifier & OSC_INDEX_MASK;
1634 	if (index < MAX_OUTSTANDING_COMMANDS) {
1635 		/* the index seems reasonable */
1636 		sp = ha->outstanding_cmds[index];
1637 		if (sp != NULL) {
1638 			if (sp->handle == resp_identifier) {
1639 				/* Neo, you're the one... */
1640 				ha->outstanding_cmds[index] = NULL;
1641 				sp->handle = 0;
1642 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1643 			} else {
1644 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1645 				    resp_identifier, sp->handle);
1646 				sp = NULL;
1647 				ql_signal_abort(ha, set_flags);
1648 			}
1649 		} else {
1650 			sp = ql_verify_preprocessed_cmd(ha,
1651 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1652 		}
1653 	} else {
1654 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1655 		    index, resp_identifier);
1656 		ql_signal_abort(ha, set_flags);
1657 	}
1658 
1659 	if (sp != NULL) {
1660 		ha->outstanding_cmds[index] = NULL;
1661 		sp->handle = 0;
1662 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1663 
1664 		/* Bad payload or header */
1665 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1666 			/* Bad payload or header, set error status. */
1667 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1668 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1669 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1670 		} else {
1671 			/* Set error status. */
1672 			sp->pkt->pkt_reason = CS_UNKNOWN;
1673 		}
1674 
1675 		/* Set completed status. */
1676 		sp->flags |= SRB_ISP_COMPLETED;
1677 
1678 		/* Place command on done queue. */
1679 		ql_add_link_b(done_q, &sp->cmd);
1680 
1681 	}
1682 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1683 }
1684 
1685 /*
1686  * ql_status_entry
1687  *	Processes received ISP2200-2300 status entry.
1688  *
1689  * Input:
1690  *	ha:		adapter state pointer.
1691  *	pkt:		entry pointer.
1692  *	done_q:		done queue pointer.
1693  *	set_flags:	task daemon flags to set.
1694  *	reset_flags:	task daemon flags to reset.
1695  *
1696  * Returns:
1697  *	BIT_0 = CS_RESET status received.
1698  *
1699  * Context:
1700  *	Interrupt or Kernel context, no mailbox commands allowed.
1701  */
1702 /* ARGSUSED */
1703 static int
1704 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1705     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1706 {
1707 	ql_srb_t		*sp;
1708 	uint32_t		index, resp_identifier;
1709 	uint16_t		comp_status;
1710 	int			rval = 0;
1711 
1712 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1713 
1714 	/* Validate the response entry handle. */
1715 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1716 	index = resp_identifier & OSC_INDEX_MASK;
1717 	if (index < MAX_OUTSTANDING_COMMANDS) {
1718 		/* the index seems reasonable */
1719 		sp = ha->outstanding_cmds[index];
1720 		if (sp != NULL) {
1721 			if (sp->handle == resp_identifier) {
1722 				/* Neo, you're the one... */
1723 				ha->outstanding_cmds[index] = NULL;
1724 				sp->handle = 0;
1725 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1726 			} else {
1727 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1728 				    resp_identifier, sp->handle);
1729 				sp = NULL;
1730 				ql_signal_abort(ha, set_flags);
1731 			}
1732 		} else {
1733 			sp = ql_verify_preprocessed_cmd(ha,
1734 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1735 		}
1736 	} else {
1737 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1738 		    index, resp_identifier);
1739 		ql_signal_abort(ha, set_flags);
1740 	}
1741 
1742 	if (sp != NULL) {
1743 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1744 		    &pkt->comp_status);
1745 
1746 		/*
1747 		 * We dont care about SCSI QFULLs.
1748 		 */
1749 		if (comp_status == CS_QUEUE_FULL) {
1750 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1751 			    sp->lun_queue->target_queue->d_id.b24,
1752 			    sp->lun_queue->lun_no);
1753 			comp_status = CS_COMPLETE;
1754 		}
1755 
1756 		/*
1757 		 * 2300 firmware marks completion status as data underrun
1758 		 * for scsi qfulls. Make it transport complete.
1759 		 */
1760 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1761 		    (comp_status == CS_DATA_UNDERRUN) &&
1762 		    (pkt->scsi_status_l != 0)) {
1763 			comp_status = CS_COMPLETE;
1764 		}
1765 
1766 		/*
1767 		 * Workaround T3 issue where we do not get any data xferred
1768 		 * but get back a good status.
1769 		 */
1770 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1771 		    comp_status == CS_COMPLETE &&
1772 		    pkt->scsi_status_l == 0 &&
1773 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1774 		    pkt->residual_length == 0 &&
1775 		    sp->fcp &&
1776 		    sp->fcp->fcp_data_len != 0 &&
1777 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1778 		    SF_DATA_OUT) {
1779 			comp_status = CS_ABORTED;
1780 		}
1781 
1782 		if (sp->flags & SRB_MS_PKT) {
1783 			/*
1784 			 * Ideally it should never be true. But there
1785 			 * is a bug in FW which upon receiving invalid
1786 			 * parameters in MS IOCB returns it as
1787 			 * status entry and not as ms entry type.
1788 			 */
1789 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1790 			    set_flags, reset_flags);
1791 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1792 			    ha->instance);
1793 			return (0);
1794 		}
1795 
1796 		ha->outstanding_cmds[index] = NULL;
1797 		sp->handle = 0;
1798 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1799 
1800 		/*
1801 		 * Fast path to good SCSI I/O completion
1802 		 */
1803 		if ((comp_status == CS_COMPLETE) &
1804 		    (!pkt->scsi_status_l) &
1805 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1806 			/* Set completed status. */
1807 			sp->flags |= SRB_ISP_COMPLETED;
1808 			sp->pkt->pkt_reason = comp_status;
1809 			ql_fast_fcp_post(sp);
1810 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1811 			    ha->instance);
1812 			return (0);
1813 		}
1814 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1815 		    reset_flags);
1816 	}
1817 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1818 
1819 	return (rval);
1820 }
1821 
1822 /*
1823  * ql_24xx_status_entry
1824  *	Processes received ISP24xx status entry.
1825  *
1826  * Input:
1827  *	ha:		adapter state pointer.
1828  *	pkt:		entry pointer.
1829  *	done_q:		done queue pointer.
1830  *	set_flags:	task daemon flags to set.
1831  *	reset_flags:	task daemon flags to reset.
1832  *
1833  * Returns:
1834  *	BIT_0 = CS_RESET status received.
1835  *
1836  * Context:
1837  *	Interrupt or Kernel context, no mailbox commands allowed.
1838  */
1839 /* ARGSUSED */
1840 static int
1841 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1842     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1843 {
1844 	ql_srb_t		*sp = NULL;
1845 	uint16_t		comp_status;
1846 	uint32_t		index, resp_identifier;
1847 	int			rval = 0;
1848 
1849 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1850 
1851 	/* Validate the response entry handle. */
1852 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1853 	index = resp_identifier & OSC_INDEX_MASK;
1854 	if (index < MAX_OUTSTANDING_COMMANDS) {
1855 		/* the index seems reasonable */
1856 		sp = ha->outstanding_cmds[index];
1857 		if (sp != NULL) {
1858 			if (sp->handle == resp_identifier) {
1859 				/* Neo, you're the one... */
1860 				ha->outstanding_cmds[index] = NULL;
1861 				sp->handle = 0;
1862 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1863 			} else {
1864 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1865 				    resp_identifier, sp->handle);
1866 				sp = NULL;
1867 				ql_signal_abort(ha, set_flags);
1868 			}
1869 		} else {
1870 			sp = ql_verify_preprocessed_cmd(ha,
1871 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1872 		}
1873 	} else {
1874 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1875 		    index, resp_identifier);
1876 		ql_signal_abort(ha, set_flags);
1877 	}
1878 
1879 	if (sp != NULL) {
1880 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1881 		    &pkt->comp_status);
1882 
1883 		/* We dont care about SCSI QFULLs. */
1884 		if (comp_status == CS_QUEUE_FULL) {
1885 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1886 			    sp->lun_queue->target_queue->d_id.b24,
1887 			    sp->lun_queue->lun_no);
1888 			comp_status = CS_COMPLETE;
1889 		}
1890 
1891 		/*
1892 		 * 2300 firmware marks completion status as data underrun
1893 		 * for scsi qfulls. Make it transport complete.
1894 		 */
1895 		if ((comp_status == CS_DATA_UNDERRUN) &&
1896 		    (pkt->scsi_status_l != 0)) {
1897 			comp_status = CS_COMPLETE;
1898 		}
1899 
1900 		/*
1901 		 * Workaround T3 issue where we do not get any data xferred
1902 		 * but get back a good status.
1903 		 */
1904 		if (comp_status == CS_COMPLETE &&
1905 		    pkt->scsi_status_l == 0 &&
1906 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1907 		    pkt->residual_length != 0 &&
1908 		    sp->fcp &&
1909 		    sp->fcp->fcp_data_len != 0 &&
1910 		    sp->fcp->fcp_cntl.cntl_write_data) {
1911 			comp_status = CS_ABORTED;
1912 		}
1913 
1914 		/*
1915 		 * Fast path to good SCSI I/O completion
1916 		 */
1917 		if ((comp_status == CS_COMPLETE) &
1918 		    (!pkt->scsi_status_l) &
1919 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1920 			/* Set completed status. */
1921 			sp->flags |= SRB_ISP_COMPLETED;
1922 			sp->pkt->pkt_reason = comp_status;
1923 			ql_fast_fcp_post(sp);
1924 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1925 			    ha->instance);
1926 			return (0);
1927 		}
1928 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
1929 		    set_flags, reset_flags);
1930 	}
1931 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1932 
1933 	return (rval);
1934 }
1935 
1936 /*
1937  * ql_verify_preprocessed_cmd
1938  *	Handles preprocessed cmds..
1939  *
1940  * Input:
1941  *	ha:		adapter state pointer.
1942  *	pkt_handle:	handle pointer.
1943  *	set_flags:	task daemon flags to set.
1944  *	reset_flags:	task daemon flags to reset.
1945  *
1946  * Returns:
1947  *	srb pointer or NULL
1948  *
1949  * Context:
1950  *	Interrupt or Kernel context, no mailbox commands allowed.
1951  */
1952 /* ARGSUSED */
1953 ql_srb_t *
1954 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle,
1955     uint32_t *set_flags, uint32_t *reset_flags)
1956 {
1957 	ql_srb_t		*sp = NULL;
1958 	uint32_t		index, resp_identifier;
1959 	uint32_t		get_handle = 10;
1960 
1961 	while (get_handle) {
1962 		/* Get handle. */
1963 		resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle);
1964 		index = resp_identifier & OSC_INDEX_MASK;
1965 		/* Validate handle. */
1966 		if (index < MAX_OUTSTANDING_COMMANDS) {
1967 			sp = ha->outstanding_cmds[index];
1968 		}
1969 
1970 		if (sp != NULL) {
1971 			EL(ha, "sp=%x, resp_id=%x, get=%d\n", sp,
1972 			    resp_identifier, get_handle);
1973 			break;
1974 		} else {
1975 			get_handle -= 1;
1976 			drv_usecwait(10000);
1977 			if (get_handle == 1) {
1978 				/* Last chance, Sync whole DMA buffer. */
1979 				(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1980 				    RESPONSE_Q_BUFFER_OFFSET,
1981 				    RESPONSE_QUEUE_SIZE,
1982 				    DDI_DMA_SYNC_FORKERNEL);
1983 				EL(ha, "last chance DMA sync\n");
1984 			}
1985 		}
1986 	}
1987 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1988 
1989 	return (sp);
1990 }
1991 
1992 
1993 /*
1994  * ql_status_error
1995  *	Processes received ISP status entry error.
1996  *
1997  * Input:
1998  *	ha:		adapter state pointer.
1999  *	sp:		SRB pointer.
2000  *	pkt:		entry pointer.
2001  *	done_q:		done queue pointer.
2002  *	set_flags:	task daemon flags to set.
2003  *	reset_flags:	task daemon flags to reset.
2004  *
2005  * Returns:
2006  *	BIT_0 = CS_RESET status received.
2007  *
2008  * Context:
2009  *	Interrupt or Kernel context, no mailbox commands allowed.
2010  */
2011 /* ARGSUSED */
2012 static int
2013 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
2014     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2015 {
2016 	uint32_t		sense_sz = 0;
2017 	uint32_t		cnt;
2018 	ql_tgt_t		*tq;
2019 	fcp_rsp_t		*fcpr;
2020 	struct fcp_rsp_info	*rsp;
2021 	int			rval = 0;
2022 
2023 	struct {
2024 		uint8_t		*rsp_info;
2025 		uint8_t		*req_sense_data;
2026 		uint32_t	residual_length;
2027 		uint32_t	fcp_residual_length;
2028 		uint32_t	rsp_info_length;
2029 		uint32_t	req_sense_length;
2030 		uint16_t	comp_status;
2031 		uint8_t		state_flags_l;
2032 		uint8_t		state_flags_h;
2033 		uint8_t		scsi_status_l;
2034 		uint8_t		scsi_status_h;
2035 	} sts;
2036 
2037 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2038 
2039 	if (CFG_IST(ha, CFG_CTRL_242581)) {
2040 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2041 
2042 		/* Setup status. */
2043 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2044 		    &pkt24->comp_status);
2045 		sts.scsi_status_l = pkt24->scsi_status_l;
2046 		sts.scsi_status_h = pkt24->scsi_status_h;
2047 
2048 		/* Setup firmware residuals. */
2049 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2050 		    ddi_get32(ha->hba_buf.acc_handle,
2051 		    (uint32_t *)&pkt24->residual_length) : 0;
2052 
2053 		/* Setup FCP residuals. */
2054 		sts.fcp_residual_length = sts.scsi_status_h &
2055 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2056 		    ddi_get32(ha->hba_buf.acc_handle,
2057 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2058 
2059 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2060 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2061 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2062 
2063 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2064 			    sts.residual_length,
2065 			    pkt24->fcp_rsp_residual_count);
2066 			sts.scsi_status_h = (uint8_t)
2067 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2068 		}
2069 
2070 		/* Setup state flags. */
2071 		sts.state_flags_l = pkt24->state_flags_l;
2072 		sts.state_flags_h = pkt24->state_flags_h;
2073 
2074 		if (sp->fcp->fcp_data_len &&
2075 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2076 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2077 			sts.state_flags_h = (uint8_t)
2078 			    (sts.state_flags_h | SF_GOT_BUS |
2079 			    SF_GOT_TARGET | SF_SENT_CMD |
2080 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2081 		} else {
2082 			sts.state_flags_h = (uint8_t)
2083 			    (sts.state_flags_h | SF_GOT_BUS |
2084 			    SF_GOT_TARGET | SF_SENT_CMD |
2085 			    SF_GOT_STATUS);
2086 		}
2087 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2088 			sts.state_flags_l = (uint8_t)
2089 			    (sts.state_flags_l | SF_DATA_OUT);
2090 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2091 			sts.state_flags_l = (uint8_t)
2092 			    (sts.state_flags_l | SF_DATA_IN);
2093 		}
2094 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2095 			sts.state_flags_l = (uint8_t)
2096 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2097 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2098 			sts.state_flags_l = (uint8_t)
2099 			    (sts.state_flags_l | SF_ORDERED_Q);
2100 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2101 			sts.state_flags_l = (uint8_t)
2102 			    (sts.state_flags_l | SF_SIMPLE_Q);
2103 		}
2104 
2105 		/* Setup FCP response info. */
2106 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2107 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2108 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2109 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2110 			if (sts.rsp_info_length >
2111 			    sizeof (struct fcp_rsp_info)) {
2112 				sts.rsp_info_length =
2113 				    sizeof (struct fcp_rsp_info);
2114 			}
2115 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2116 				ql_chg_endian(sts.rsp_info + cnt, 4);
2117 			}
2118 		} else {
2119 			sts.rsp_info_length = 0;
2120 		}
2121 
2122 		/* Setup sense data. */
2123 		sts.req_sense_data =
2124 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2125 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2126 			sts.req_sense_length =
2127 			    ddi_get32(ha->hba_buf.acc_handle,
2128 			    (uint32_t *)&pkt24->fcp_sense_length);
2129 			sts.state_flags_h = (uint8_t)
2130 			    (sts.state_flags_h | SF_ARQ_DONE);
2131 			sense_sz = (uint32_t)
2132 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2133 			    (uintptr_t)sts.req_sense_data);
2134 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2135 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2136 			}
2137 		} else {
2138 			sts.req_sense_length = 0;
2139 		}
2140 	} else {
2141 		/* Setup status. */
2142 		sts.comp_status = (uint16_t)ddi_get16(
2143 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2144 		sts.scsi_status_l = pkt23->scsi_status_l;
2145 		sts.scsi_status_h = pkt23->scsi_status_h;
2146 
2147 		/* Setup firmware residuals. */
2148 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2149 		    ddi_get32(ha->hba_buf.acc_handle,
2150 		    (uint32_t *)&pkt23->residual_length) : 0;
2151 
2152 		/* Setup FCP residuals. */
2153 		sts.fcp_residual_length = sts.scsi_status_h &
2154 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2155 		    sts.residual_length : 0;
2156 
2157 		/* Setup state flags. */
2158 		sts.state_flags_l = pkt23->state_flags_l;
2159 		sts.state_flags_h = pkt23->state_flags_h;
2160 
2161 		/* Setup FCP response info. */
2162 		sts.rsp_info = &pkt23->rsp_info[0];
2163 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2164 			sts.rsp_info_length = ddi_get16(
2165 			    ha->hba_buf.acc_handle,
2166 			    (uint16_t *)&pkt23->rsp_info_length);
2167 			if (sts.rsp_info_length >
2168 			    sizeof (struct fcp_rsp_info)) {
2169 				sts.rsp_info_length =
2170 				    sizeof (struct fcp_rsp_info);
2171 			}
2172 		} else {
2173 			sts.rsp_info_length = 0;
2174 		}
2175 
2176 		/* Setup sense data. */
2177 		sts.req_sense_data = &pkt23->req_sense_data[0];
2178 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2179 		    ddi_get16(ha->hba_buf.acc_handle,
2180 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2181 	}
2182 
2183 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2184 
2185 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2186 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2187 	    sizeof (fcp_rsp_t));
2188 
2189 	tq = sp->lun_queue->target_queue;
2190 
2191 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2192 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2193 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2194 	}
2195 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2196 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2197 	}
2198 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2199 		fcpr->fcp_u.fcp_status.resid_over = 1;
2200 	}
2201 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2202 		fcpr->fcp_u.fcp_status.resid_under = 1;
2203 	}
2204 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2205 
2206 	/* Set ISP completion status */
2207 	sp->pkt->pkt_reason = sts.comp_status;
2208 
2209 	/* Update statistics. */
2210 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2211 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2212 
2213 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2214 		if (sense_sz > sts.rsp_info_length) {
2215 			sense_sz = sts.rsp_info_length;
2216 		}
2217 
2218 		/* copy response information data. */
2219 		if (sense_sz) {
2220 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2221 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2222 		}
2223 		fcpr->fcp_response_len = sense_sz;
2224 
2225 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2226 		    fcpr->fcp_response_len);
2227 
2228 		switch (*(sts.rsp_info + 3)) {
2229 		case FCP_NO_FAILURE:
2230 			break;
2231 		case FCP_DL_LEN_MISMATCH:
2232 			ha->adapter_stats->d_stats[lobyte(
2233 			    tq->loop_id)].dl_len_mismatches++;
2234 			break;
2235 		case FCP_CMND_INVALID:
2236 			break;
2237 		case FCP_DATA_RO_MISMATCH:
2238 			ha->adapter_stats->d_stats[lobyte(
2239 			    tq->loop_id)].data_ro_mismatches++;
2240 			break;
2241 		case FCP_TASK_MGMT_NOT_SUPPTD:
2242 			break;
2243 		case FCP_TASK_MGMT_FAILED:
2244 			ha->adapter_stats->d_stats[lobyte(
2245 			    tq->loop_id)].task_mgmt_failures++;
2246 			break;
2247 		default:
2248 			break;
2249 		}
2250 	} else {
2251 		/*
2252 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2253 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2254 		 */
2255 		fcpr->fcp_response_len = 0;
2256 	}
2257 
2258 	/* Set reset status received. */
2259 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2260 		rval |= BIT_0;
2261 	}
2262 
2263 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2264 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2265 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2266 	    ha->task_daemon_flags & LOOP_DOWN) {
2267 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2268 		    tq->d_id.b24, sp->lun_queue->lun_no);
2269 
2270 		/* Set retry status. */
2271 		sp->flags |= SRB_RETRY;
2272 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2273 	    tq->port_down_retry_count != 0 &&
2274 	    (sts.comp_status == CS_INCOMPLETE ||
2275 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2276 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2277 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2278 	    sts.comp_status == CS_PORT_BUSY)) {
2279 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2280 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2281 		    tq->port_down_retry_count);
2282 
2283 		/* Set retry status. */
2284 		sp->flags |= SRB_RETRY;
2285 
2286 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2287 			/* Acquire device queue lock. */
2288 			DEVICE_QUEUE_LOCK(tq);
2289 
2290 			tq->flags |= TQF_QUEUE_SUSPENDED;
2291 
2292 			/* Decrement port down count. */
2293 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2294 				tq->port_down_retry_count--;
2295 			}
2296 
2297 			DEVICE_QUEUE_UNLOCK(tq);
2298 
2299 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2300 			    == 0 &&
2301 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2302 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2303 				sp->ha->adapter_stats->d_stats[lobyte(
2304 				    tq->loop_id)].logouts_recvd++;
2305 				ql_send_logo(sp->ha, tq, done_q);
2306 			}
2307 
2308 			ADAPTER_STATE_LOCK(ha);
2309 			if (ha->port_retry_timer == 0) {
2310 				if ((ha->port_retry_timer =
2311 				    ha->port_down_retry_delay) == 0) {
2312 					*set_flags |=
2313 					    PORT_RETRY_NEEDED;
2314 				}
2315 			}
2316 			ADAPTER_STATE_UNLOCK(ha);
2317 		}
2318 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2319 	    (sts.comp_status == CS_RESET ||
2320 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2321 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2322 		if (sts.comp_status == CS_RESET) {
2323 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2324 			    tq->d_id.b24, sp->lun_queue->lun_no);
2325 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2326 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2327 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2328 			    tq->qfull_retry_count);
2329 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2330 				tq->flags |= TQF_QUEUE_SUSPENDED;
2331 
2332 				tq->qfull_retry_count--;
2333 
2334 				ADAPTER_STATE_LOCK(ha);
2335 				if (ha->port_retry_timer == 0) {
2336 					if ((ha->port_retry_timer =
2337 					    ha->qfull_retry_delay) ==
2338 					    0) {
2339 						*set_flags |=
2340 						    PORT_RETRY_NEEDED;
2341 					}
2342 				}
2343 				ADAPTER_STATE_UNLOCK(ha);
2344 			}
2345 		} else {
2346 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2347 			    tq->d_id.b24, sp->lun_queue->lun_no);
2348 		}
2349 
2350 		/* Set retry status. */
2351 		sp->flags |= SRB_RETRY;
2352 	} else {
2353 		fcpr->fcp_resid =
2354 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2355 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2356 
2357 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2358 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2359 
2360 			if (sts.scsi_status_l == STATUS_CHECK) {
2361 				sp->pkt->pkt_reason = CS_COMPLETE;
2362 			} else {
2363 				EL(ha, "transport error - "
2364 				    "underrun & invalid resid\n");
2365 				EL(ha, "ssh=%xh, ssl=%xh\n",
2366 				    sts.scsi_status_h, sts.scsi_status_l);
2367 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2368 			}
2369 		}
2370 
2371 		/* Ignore firmware underrun error. */
2372 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2373 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2374 		    (sts.scsi_status_l != STATUS_CHECK &&
2375 		    sts.scsi_status_l != STATUS_GOOD))) {
2376 			sp->pkt->pkt_reason = CS_COMPLETE;
2377 		}
2378 
2379 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2380 			ha->xioctl->DeviceErrorCount++;
2381 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2382 			    "\n", sts.comp_status, tq->d_id.b24,
2383 			    sp->lun_queue->lun_no);
2384 		}
2385 
2386 		/* Set target request sense data. */
2387 		if (sts.scsi_status_l == STATUS_CHECK) {
2388 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2389 
2390 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2391 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2392 				    sts.req_sense_data[2] !=
2393 				    KEY_UNIT_ATTENTION) {
2394 					ha->xioctl->DeviceErrorCount++;
2395 				}
2396 
2397 				sense_sz = sts.req_sense_length;
2398 
2399 				/* Insure data does not exceed buf. */
2400 				if (sp->pkt->pkt_rsplen <=
2401 				    (uint32_t)sizeof (fcp_rsp_t) +
2402 				    fcpr->fcp_response_len) {
2403 					sp->request_sense_length = 0;
2404 				} else {
2405 					sp->request_sense_length = (uint32_t)
2406 					    (sp->pkt->pkt_rsplen -
2407 					    sizeof (fcp_rsp_t) -
2408 					    fcpr->fcp_response_len);
2409 				}
2410 
2411 				if (sense_sz <
2412 				    sp->request_sense_length) {
2413 					sp->request_sense_length =
2414 					    sense_sz;
2415 				}
2416 
2417 				sp->request_sense_ptr = (caddr_t)rsp;
2418 
2419 				sense_sz = (uint32_t)
2420 				    (((uintptr_t)pkt23 +
2421 				    sizeof (sts_entry_t)) -
2422 				    (uintptr_t)sts.req_sense_data);
2423 				if (sp->request_sense_length <
2424 				    sense_sz) {
2425 					sense_sz =
2426 					    sp->request_sense_length;
2427 				}
2428 
2429 				fcpr->fcp_sense_len = sense_sz;
2430 
2431 				/* Move sense data. */
2432 				ddi_rep_get8(ha->hba_buf.acc_handle,
2433 				    (uint8_t *)sp->request_sense_ptr,
2434 				    sts.req_sense_data,
2435 				    (size_t)sense_sz,
2436 				    DDI_DEV_AUTOINCR);
2437 
2438 				sp->request_sense_ptr += sense_sz;
2439 				sp->request_sense_length -= sense_sz;
2440 				if (sp->request_sense_length != 0) {
2441 					ha->status_srb = sp;
2442 				}
2443 			}
2444 
2445 			if (sense_sz != 0) {
2446 				EL(sp->ha, "check condition sense data, "
2447 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2448 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2449 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2450 				    sp->lun_queue->lun_no,
2451 				    sts.req_sense_data[0],
2452 				    sts.req_sense_data[1],
2453 				    sts.req_sense_data[2],
2454 				    sts.req_sense_data[3],
2455 				    sts.req_sense_data[4],
2456 				    sts.req_sense_data[5],
2457 				    sts.req_sense_data[6],
2458 				    sts.req_sense_data[7],
2459 				    sts.req_sense_data[8],
2460 				    sts.req_sense_data[9],
2461 				    sts.req_sense_data[10],
2462 				    sts.req_sense_data[11],
2463 				    sts.req_sense_data[12],
2464 				    sts.req_sense_data[13],
2465 				    sts.req_sense_data[14],
2466 				    sts.req_sense_data[15],
2467 				    sts.req_sense_data[16],
2468 				    sts.req_sense_data[17]);
2469 			} else {
2470 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2471 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2472 			}
2473 		}
2474 	}
2475 
2476 	/* Set completed status. */
2477 	sp->flags |= SRB_ISP_COMPLETED;
2478 
2479 	/* Place command on done queue. */
2480 	if (ha->status_srb == NULL) {
2481 		ql_add_link_b(done_q, &sp->cmd);
2482 	}
2483 
2484 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2485 
2486 	return (rval);
2487 }
2488 
2489 /*
2490  * ql_status_cont_entry
2491  *	Processes status continuation entry.
2492  *
2493  * Input:
2494  *	ha:		adapter state pointer.
2495  *	pkt:		entry pointer.
2496  *	done_q:		done queue pointer.
2497  *	set_flags:	task daemon flags to set.
2498  *	reset_flags:	task daemon flags to reset.
2499  *
2500  * Context:
2501  *	Interrupt or Kernel context, no mailbox commands allowed.
2502  */
2503 /* ARGSUSED */
2504 static void
2505 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2506     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2507 {
2508 	uint32_t	sense_sz, index;
2509 	ql_srb_t	*sp = ha->status_srb;
2510 
2511 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2512 
2513 	if (sp != NULL && sp->request_sense_length) {
2514 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2515 			sense_sz = sizeof (pkt->req_sense_data);
2516 		} else {
2517 			sense_sz = sp->request_sense_length;
2518 		}
2519 
2520 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2521 			for (index = 0; index < sense_sz; index += 4) {
2522 				ql_chg_endian((uint8_t *)
2523 				    &pkt->req_sense_data[0] + index, 4);
2524 			}
2525 		}
2526 
2527 		/* Move sense data. */
2528 		ddi_rep_get8(ha->hba_buf.acc_handle,
2529 		    (uint8_t *)sp->request_sense_ptr,
2530 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2531 		    DDI_DEV_AUTOINCR);
2532 
2533 		sp->request_sense_ptr += sense_sz;
2534 		sp->request_sense_length -= sense_sz;
2535 
2536 		/* Place command on done queue. */
2537 		if (sp->request_sense_length == 0) {
2538 			ql_add_link_b(done_q, &sp->cmd);
2539 			ha->status_srb = NULL;
2540 		}
2541 	}
2542 
2543 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2544 }
2545 
2546 /*
2547  * ql_ip_entry
2548  *	Processes received ISP IP entry.
2549  *
2550  * Input:
2551  *	ha:		adapter state pointer.
2552  *	pkt:		entry pointer.
2553  *	done_q:		done queue pointer.
2554  *	set_flags:	task daemon flags to set.
2555  *	reset_flags:	task daemon flags to reset.
2556  *
2557  * Context:
2558  *	Interrupt or Kernel context, no mailbox commands allowed.
2559  */
2560 /* ARGSUSED */
2561 static void
2562 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2563     uint32_t *set_flags, uint32_t *reset_flags)
2564 {
2565 	ql_srb_t	*sp;
2566 	uint32_t	index, resp_identifier;
2567 	ql_tgt_t	*tq;
2568 
2569 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2570 
2571 	/* Validate the response entry handle. */
2572 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2573 	index = resp_identifier & OSC_INDEX_MASK;
2574 	if (index < MAX_OUTSTANDING_COMMANDS) {
2575 		/* the index seems reasonable */
2576 		sp = ha->outstanding_cmds[index];
2577 		if (sp != NULL) {
2578 			if (sp->handle == resp_identifier) {
2579 				/* Neo, you're the one... */
2580 				ha->outstanding_cmds[index] = NULL;
2581 				sp->handle = 0;
2582 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2583 			} else {
2584 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2585 				    resp_identifier, sp->handle);
2586 				sp = NULL;
2587 				ql_signal_abort(ha, set_flags);
2588 			}
2589 		} else {
2590 			sp = ql_verify_preprocessed_cmd(ha,
2591 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2592 		}
2593 	} else {
2594 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2595 		    index, resp_identifier);
2596 		ql_signal_abort(ha, set_flags);
2597 	}
2598 
2599 	if (sp != NULL) {
2600 		tq = sp->lun_queue->target_queue;
2601 
2602 		/* Set ISP completion status */
2603 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2604 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2605 
2606 			sp->pkt->pkt_reason = ddi_get16(
2607 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
2608 		} else {
2609 			sp->pkt->pkt_reason = ddi_get16(
2610 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2611 		}
2612 
2613 		if (ha->task_daemon_flags & LOOP_DOWN) {
2614 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2615 			    tq->d_id.b24);
2616 
2617 			/* Set retry status. */
2618 			sp->flags |= SRB_RETRY;
2619 
2620 		} else if (tq->port_down_retry_count &&
2621 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2622 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2623 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2624 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2625 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2626 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2627 			    sp->pkt->pkt_reason, tq->d_id.b24,
2628 			    tq->port_down_retry_count);
2629 
2630 			/* Set retry status. */
2631 			sp->flags |= SRB_RETRY;
2632 
2633 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2634 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2635 				ha->adapter_stats->d_stats[lobyte(
2636 				    tq->loop_id)].logouts_recvd++;
2637 				ql_send_logo(ha, tq, done_q);
2638 			}
2639 
2640 			/* Acquire device queue lock. */
2641 			DEVICE_QUEUE_LOCK(tq);
2642 
2643 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2644 				tq->flags |= TQF_QUEUE_SUSPENDED;
2645 
2646 				tq->port_down_retry_count--;
2647 
2648 				ADAPTER_STATE_LOCK(ha);
2649 				if (ha->port_retry_timer == 0) {
2650 					if ((ha->port_retry_timer =
2651 					    ha->port_down_retry_delay) == 0) {
2652 						*set_flags |=
2653 						    PORT_RETRY_NEEDED;
2654 					}
2655 				}
2656 				ADAPTER_STATE_UNLOCK(ha);
2657 			}
2658 
2659 			/* Release device queue specific lock. */
2660 			DEVICE_QUEUE_UNLOCK(tq);
2661 
2662 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2663 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2664 
2665 			/* Set retry status. */
2666 			sp->flags |= SRB_RETRY;
2667 		} else {
2668 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2669 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2670 				    sp->pkt->pkt_reason, tq->d_id.b24);
2671 			}
2672 		}
2673 
2674 		/* Set completed status. */
2675 		sp->flags |= SRB_ISP_COMPLETED;
2676 
2677 		ql_add_link_b(done_q, &sp->cmd);
2678 
2679 	}
2680 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2681 }
2682 
2683 /*
2684  * ql_ip_rcv_entry
2685  *	Processes received ISP IP buffers entry.
2686  *
2687  * Input:
2688  *	ha:		adapter state pointer.
2689  *	pkt:		entry pointer.
2690  *	done_q:		done queue pointer.
2691  *	set_flags:	task daemon flags to set.
2692  *	reset_flags:	task daemon flags to reset.
2693  *
2694  * Context:
2695  *	Interrupt or Kernel context, no mailbox commands allowed.
2696  */
2697 /* ARGSUSED */
2698 static void
2699 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2700     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2701 {
2702 	port_id_t	s_id;
2703 	uint16_t	index;
2704 	uint8_t		cnt;
2705 	ql_tgt_t	*tq;
2706 
2707 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2708 
2709 	/* Locate device queue. */
2710 	s_id.b.al_pa = pkt->s_id[0];
2711 	s_id.b.area = pkt->s_id[1];
2712 	s_id.b.domain = pkt->s_id[2];
2713 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2714 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2715 		return;
2716 	}
2717 
2718 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2719 	    &pkt->seq_length);
2720 	tq->ub_total_seg_cnt = pkt->segment_count;
2721 	tq->ub_seq_id = ++ha->ub_seq_id;
2722 	tq->ub_seq_cnt = 0;
2723 	tq->ub_frame_ro = 0;
2724 	tq->ub_loop_id = pkt->loop_id;
2725 	ha->rcv_dev_q = tq;
2726 
2727 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2728 	    tq->ub_total_seg_cnt; cnt++) {
2729 
2730 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2731 		    &pkt->buffer_handle[cnt]);
2732 
2733 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2734 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2735 			*set_flags |= ISP_ABORT_NEEDED;
2736 			break;
2737 		}
2738 	}
2739 
2740 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2741 }
2742 
2743 /*
2744  * ql_ip_rcv_cont_entry
2745  *	Processes received ISP IP buffers continuation entry.
2746  *
2747  * Input:
2748  *	ha:		adapter state pointer.
2749  *	pkt:		entry pointer.
2750  *	done_q:		done queue pointer.
2751  *	set_flags:	task daemon flags to set.
2752  *	reset_flags:	task daemon flags to reset.
2753  *
2754  * Context:
2755  *	Interrupt or Kernel context, no mailbox commands allowed.
2756  */
2757 /* ARGSUSED */
2758 static void
2759 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2760     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2761 {
2762 	uint16_t	index;
2763 	uint8_t		cnt;
2764 	ql_tgt_t	*tq;
2765 
2766 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2767 
2768 	if ((tq = ha->rcv_dev_q) == NULL) {
2769 		EL(ha, "No IP receive device\n");
2770 		return;
2771 	}
2772 
2773 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2774 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2775 
2776 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2777 		    &pkt->buffer_handle[cnt]);
2778 
2779 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2780 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2781 			*set_flags |= ISP_ABORT_NEEDED;
2782 			break;
2783 		}
2784 	}
2785 
2786 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2787 }
2788 
2789 /*
2790  * ip_rcv_24xx_entry_t
2791  *	Processes received ISP24xx IP buffers entry.
2792  *
2793  * Input:
2794  *	ha:		adapter state pointer.
2795  *	pkt:		entry pointer.
2796  *	done_q:		done queue pointer.
2797  *	set_flags:	task daemon flags to set.
2798  *	reset_flags:	task daemon flags to reset.
2799  *
2800  * Context:
2801  *	Interrupt or Kernel context, no mailbox commands allowed.
2802  */
2803 /* ARGSUSED */
2804 static void
2805 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2806     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2807 {
2808 	port_id_t	s_id;
2809 	uint16_t	index;
2810 	uint8_t		cnt;
2811 	ql_tgt_t	*tq;
2812 
2813 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2814 
2815 	/* Locate device queue. */
2816 	s_id.b.al_pa = pkt->s_id[0];
2817 	s_id.b.area = pkt->s_id[1];
2818 	s_id.b.domain = pkt->s_id[2];
2819 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2820 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2821 		return;
2822 	}
2823 
2824 	if (tq->ub_total_seg_cnt == 0) {
2825 		tq->ub_sequence_length = (uint16_t)ddi_get16(
2826 		    ha->hba_buf.acc_handle, &pkt->seq_length);
2827 		tq->ub_total_seg_cnt = pkt->segment_count;
2828 		tq->ub_seq_id = ++ha->ub_seq_id;
2829 		tq->ub_seq_cnt = 0;
2830 		tq->ub_frame_ro = 0;
2831 		tq->ub_loop_id = (uint16_t)ddi_get16(
2832 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2833 	}
2834 
2835 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2836 	    tq->ub_total_seg_cnt; cnt++) {
2837 
2838 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2839 		    &pkt->buffer_handle[cnt]);
2840 
2841 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2842 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2843 			*set_flags |= ISP_ABORT_NEEDED;
2844 			break;
2845 		}
2846 	}
2847 
2848 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2849 }
2850 
2851 /*
2852  * ql_ms_entry
2853  *	Processes received Name/Management/CT Pass-Through entry.
2854  *
2855  * Input:
2856  *	ha:		adapter state pointer.
2857  *	pkt23:		entry pointer.
2858  *	done_q:		done queue pointer.
2859  *	set_flags:	task daemon flags to set.
2860  *	reset_flags:	task daemon flags to reset.
2861  *
2862  * Context:
2863  *	Interrupt or Kernel context, no mailbox commands allowed.
2864  */
2865 /* ARGSUSED */
2866 static void
2867 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2868     uint32_t *set_flags, uint32_t *reset_flags)
2869 {
2870 	ql_srb_t		*sp;
2871 	uint32_t		index, cnt, resp_identifier;
2872 	ql_tgt_t		*tq;
2873 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
2874 
2875 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2876 
2877 	/* Validate the response entry handle. */
2878 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2879 	index = resp_identifier & OSC_INDEX_MASK;
2880 	if (index < MAX_OUTSTANDING_COMMANDS) {
2881 		/* the index seems reasonable */
2882 		sp = ha->outstanding_cmds[index];
2883 		if (sp != NULL) {
2884 			if (sp->handle == resp_identifier) {
2885 				/* Neo, you're the one... */
2886 				ha->outstanding_cmds[index] = NULL;
2887 				sp->handle = 0;
2888 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2889 			} else {
2890 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2891 				    resp_identifier, sp->handle);
2892 				sp = NULL;
2893 				ql_signal_abort(ha, set_flags);
2894 			}
2895 		} else {
2896 			sp = ql_verify_preprocessed_cmd(ha,
2897 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2898 		}
2899 	} else {
2900 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2901 		    index, resp_identifier);
2902 		ql_signal_abort(ha, set_flags);
2903 	}
2904 
2905 	if (sp != NULL) {
2906 		if (!(sp->flags & SRB_MS_PKT)) {
2907 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
2908 			    sp->flags);
2909 			*set_flags |= ISP_ABORT_NEEDED;
2910 			return;
2911 		}
2912 
2913 		tq = sp->lun_queue->target_queue;
2914 
2915 		/* Set ISP completion status */
2916 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2917 			sp->pkt->pkt_reason = ddi_get16(
2918 			    ha->hba_buf.acc_handle, &pkt24->status);
2919 		} else {
2920 			sp->pkt->pkt_reason = ddi_get16(
2921 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2922 		}
2923 
2924 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
2925 		    sp->retry_count) {
2926 			EL(ha, "Resouce Unavailable Retry = %d\n",
2927 			    sp->retry_count);
2928 
2929 			/* Set retry status. */
2930 			sp->retry_count--;
2931 			sp->flags |= SRB_RETRY;
2932 
2933 			/* Acquire device queue lock. */
2934 			DEVICE_QUEUE_LOCK(tq);
2935 
2936 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
2937 				tq->flags |= TQF_QUEUE_SUSPENDED;
2938 
2939 				ADAPTER_STATE_LOCK(ha);
2940 				if (ha->port_retry_timer == 0) {
2941 					ha->port_retry_timer = 2;
2942 				}
2943 				ADAPTER_STATE_UNLOCK(ha);
2944 			}
2945 
2946 			/* Release device queue specific lock. */
2947 			DEVICE_QUEUE_UNLOCK(tq);
2948 
2949 		} else if (tq->port_down_retry_count &&
2950 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2951 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2952 			EL(ha, "Port Down Retry\n");
2953 
2954 			/* Set retry status. */
2955 			sp->flags |= SRB_RETRY;
2956 
2957 			/* Acquire device queue lock. */
2958 			DEVICE_QUEUE_LOCK(tq);
2959 
2960 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2961 				tq->flags |= TQF_QUEUE_SUSPENDED;
2962 
2963 				tq->port_down_retry_count--;
2964 
2965 				ADAPTER_STATE_LOCK(ha);
2966 				if (ha->port_retry_timer == 0) {
2967 					if ((ha->port_retry_timer =
2968 					    ha->port_down_retry_delay) == 0) {
2969 						*set_flags |=
2970 						    PORT_RETRY_NEEDED;
2971 					}
2972 				}
2973 				ADAPTER_STATE_UNLOCK(ha);
2974 			}
2975 			/* Release device queue specific lock. */
2976 			DEVICE_QUEUE_UNLOCK(tq);
2977 
2978 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2979 			EL(ha, "Reset Retry\n");
2980 
2981 			/* Set retry status. */
2982 			sp->flags |= SRB_RETRY;
2983 
2984 		} else if (CFG_IST(ha, CFG_CTRL_242581) &&
2985 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
2986 			cnt = ddi_get32(ha->hba_buf.acc_handle,
2987 			    &pkt24->resp_byte_count);
2988 			if (cnt < sizeof (fc_ct_header_t)) {
2989 				EL(ha, "Data underrun\n");
2990 			} else {
2991 				sp->pkt->pkt_reason = CS_COMPLETE;
2992 			}
2993 
2994 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
2995 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
2996 		}
2997 
2998 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
2999 			/*EMPTY*/
3000 			QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
3001 			    ha->instance, sp->pkt->pkt_cmd[8],
3002 			    sp->pkt->pkt_cmd[9]);
3003 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3004 		}
3005 
3006 		/* For nameserver restore command, management change header. */
3007 		if ((sp->flags & SRB_RETRY) == 0) {
3008 			tq->d_id.b24 == 0xfffffc ?
3009 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3010 			    sp->pkt->pkt_cmd, B_TRUE) :
3011 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3012 			    sp->pkt->pkt_resp, B_TRUE);
3013 		}
3014 
3015 		/* Set completed status. */
3016 		sp->flags |= SRB_ISP_COMPLETED;
3017 
3018 		/* Place command on done queue. */
3019 		ql_add_link_b(done_q, &sp->cmd);
3020 
3021 	}
3022 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3023 }
3024 
3025 /*
3026  * ql_report_id_entry
3027  *	Processes received Name/Management/CT Pass-Through entry.
3028  *
3029  * Input:
3030  *	ha:		adapter state pointer.
3031  *	pkt23:		entry pointer.
3032  *	done_q:		done queue pointer.
3033  *	set_flags:	task daemon flags to set.
3034  *	reset_flags:	task daemon flags to reset.
3035  *
3036  * Context:
3037  *	Interrupt or Kernel context, no mailbox commands allowed.
3038  */
3039 /* ARGSUSED */
3040 static void
3041 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3042     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3043 {
3044 	ql_adapter_state_t	*vha;
3045 
3046 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3047 
3048 	EL(ha, "format=%d, vp=%d, status=%d\n",
3049 	    pkt->format, pkt->vp_index, pkt->status);
3050 
3051 	if (pkt->format == 1) {
3052 		/* Locate port state structure. */
3053 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3054 			if (vha->vp_index == pkt->vp_index) {
3055 				break;
3056 			}
3057 		}
3058 		if (vha != NULL && vha->vp_index != 0 &&
3059 		    (pkt->status == CS_COMPLETE ||
3060 		    pkt->status == CS_PORT_ID_CHANGE)) {
3061 			*set_flags |= LOOP_RESYNC_NEEDED;
3062 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3063 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3064 			TASK_DAEMON_LOCK(ha);
3065 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3066 			vha->task_daemon_flags &= ~LOOP_DOWN;
3067 			TASK_DAEMON_UNLOCK(ha);
3068 		}
3069 	}
3070 
3071 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3072 }
3073 
3074 /*
3075  * ql_els_entry
3076  *	Processes received ELS Pass-Through entry.
3077  *
3078  * Input:
3079  *	ha:		adapter state pointer.
3080  *	pkt23:		entry pointer.
3081  *	done_q:		done queue pointer.
3082  *	set_flags:	task daemon flags to set.
3083  *	reset_flags:	task daemon flags to reset.
3084  *
3085  * Context:
3086  *	Interrupt or Kernel context, no mailbox commands allowed.
3087  */
3088 /* ARGSUSED */
3089 static void
3090 ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp,
3091     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3092 {
3093 	ql_tgt_t	*tq;
3094 	port_id_t	d_id, s_id;
3095 	ql_srb_t	*srb;
3096 	uint32_t	index, resp_identifier;
3097 
3098 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3099 
3100 	/* Validate the response entry handle. */
3101 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle);
3102 	index = resp_identifier & OSC_INDEX_MASK;
3103 	if (index < MAX_OUTSTANDING_COMMANDS) {
3104 		/* the index seems reasonable */
3105 		srb = ha->outstanding_cmds[index];
3106 		if (srb != NULL) {
3107 			if (srb->handle == resp_identifier) {
3108 				/* Neo, you're the one... */
3109 				ha->outstanding_cmds[index] = NULL;
3110 				srb->handle = 0;
3111 				srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3112 			} else {
3113 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3114 				    resp_identifier, srb->handle);
3115 				srb = NULL;
3116 				ql_signal_abort(ha, set_flags);
3117 			}
3118 		} else {
3119 			srb = ql_verify_preprocessed_cmd(ha,
3120 			    (uint32_t *)&rsp->handle, set_flags, reset_flags);
3121 		}
3122 	} else {
3123 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3124 		    index, resp_identifier);
3125 		ql_signal_abort(ha, set_flags);
3126 	}
3127 
3128 	if (srb != NULL) {
3129 		if (!(srb->flags & SRB_ELS_PKT)) {
3130 			EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed",
3131 			    srb->flags);
3132 			*set_flags |= ISP_ABORT_NEEDED;
3133 			return;
3134 		}
3135 
3136 		(void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3137 		    DDI_DMA_SYNC_FORKERNEL);
3138 
3139 		/* Set ISP completion status */
3140 		srb->pkt->pkt_reason = ddi_get16(
3141 		    ha->hba_buf.acc_handle, &rsp->comp_status);
3142 
3143 		if (srb->pkt->pkt_reason != CS_COMPLETE) {
3144 			la_els_rjt_t	rjt;
3145 			EL(ha, "status err=%xh\n", srb->pkt->pkt_reason);
3146 
3147 			if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3148 				EL(ha, "e1=%xh e2=%xh\n",
3149 				    rsp->error_subcode1, rsp->error_subcode2);
3150 			}
3151 
3152 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3153 
3154 			/* Build RJT in the response. */
3155 			rjt.ls_code.ls_code = LA_ELS_RJT;
3156 			rjt.reason = FC_REASON_NO_CONNECTION;
3157 
3158 			ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3159 			    (uint8_t *)srb->pkt->pkt_resp,
3160 			    sizeof (rjt), DDI_DEV_AUTOINCR);
3161 
3162 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3163 			srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3164 		}
3165 
3166 		if (srb->pkt->pkt_reason == CS_COMPLETE) {
3167 			uint8_t		opcode;
3168 			uint16_t	loop_id;
3169 
3170 			/* Indicate ISP completion */
3171 			srb->flags |= SRB_ISP_COMPLETED;
3172 
3173 			loop_id = ddi_get16(ha->hba_buf.acc_handle,
3174 			    &rsp->n_port_hdl);
3175 
3176 			if (ha->topology & QL_N_PORT) {
3177 				/* create a target Q if there isn't one */
3178 				tq = ql_loop_id_to_queue(ha, loop_id);
3179 				if (tq == NULL) {
3180 					d_id.b.al_pa = rsp->d_id_7_0;
3181 					d_id.b.area = rsp->d_id_15_8;
3182 					d_id.b.domain = rsp->d_id_23_16;
3183 					/* Acquire adapter state lock. */
3184 					ADAPTER_STATE_LOCK(ha);
3185 
3186 					tq = ql_dev_init(ha, d_id, loop_id);
3187 					EL(ha, " tq = %x\n", tq);
3188 
3189 					ADAPTER_STATE_UNLOCK(ha);
3190 				}
3191 
3192 				/* on plogi success assume the chosen s_id */
3193 				opcode = ddi_get8(ha->hba_buf.acc_handle,
3194 				    &rsp->els_cmd_opcode);
3195 
3196 				EL(ha, "els_cmd_opcode=%x srb->pkt=%x\n",
3197 				    opcode, srb->pkt);
3198 
3199 				if (opcode == LA_ELS_PLOGI) {
3200 					s_id.b.al_pa = rsp->s_id_7_0;
3201 					s_id.b.area = rsp->s_id_15_8;
3202 					s_id.b.domain = rsp->s_id_23_16;
3203 
3204 					ha->d_id.b24 = s_id.b24;
3205 					EL(ha, "Set port's source ID %xh\n",
3206 					    ha->d_id.b24);
3207 				}
3208 			}
3209 			ql_isp_els_handle_rsp_endian(ha, srb);
3210 
3211 			if (ha != srb->ha) {
3212 				EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3213 			}
3214 
3215 			if (tq != NULL) {
3216 				tq->logout_sent = 0;
3217 				tq->flags &= ~TQF_NEED_AUTHENTICATION;
3218 
3219 				if (CFG_IST(ha, CFG_CTRL_242581)) {
3220 					tq->flags |= TQF_IIDMA_NEEDED;
3221 				}
3222 			srb->pkt->pkt_state = FC_PKT_SUCCESS;
3223 			}
3224 		}
3225 		/* invoke the callback */
3226 		ql_awaken_task_daemon(ha, srb, 0, 0);
3227 	}
3228 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3229 }
3230 
3231 /*
3232  * ql_signal_abort
3233  *	Signal to the task daemon that a condition warranting an
3234  *	isp reset has been detected.
3235  *
3236  * Input:
3237  *	ha:		adapter state pointer.
3238  *	set_flags:	task daemon flags to set.
3239  *
3240  * Context:
3241  *	Interrupt or Kernel context, no mailbox commands allowed.
3242  */
3243 static void
3244 ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags)
3245 {
3246 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
3247 		*set_flags |= ISP_ABORT_NEEDED;
3248 	}
3249 }
3250