1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2009 QLogic Corporation */
23 
24 /*
25  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
26  * Use is subject to license terms.
27  */
28 
29 #pragma ident	"Copyright 2009 QLogic Corporation; ql_isr.c"
30 
31 /*
32  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
33  *
34  * ***********************************************************************
35  * *									**
36  * *				NOTICE					**
37  * *		COPYRIGHT (C) 1996-2009 QLOGIC CORPORATION		**
38  * *			ALL RIGHTS RESERVED				**
39  * *									**
40  * ***********************************************************************
41  *
42  */
43 
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_iocb.h>
48 #include <ql_isr.h>
49 #include <ql_init.h>
50 #include <ql_mbx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57     uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60     uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62     uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65     uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67     uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69     uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71     ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73     ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75     ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77     uint32_t *, uint32_t *);
78 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79     ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83     ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85     uint32_t *, uint32_t *);
86 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87     ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_els_passthru_entry(ql_adapter_state_t *,
89     els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 
91 /*
92  * ql_isr
93  *	Process all INTX intr types.
94  *
95  * Input:
96  *	arg1:	adapter state pointer.
97  *
98  * Returns:
99  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
100  *
101  * Context:
102  *	Interrupt or Kernel context, no mailbox commands allowed.
103  */
104 /* ARGSUSED */
105 uint_t
106 ql_isr(caddr_t arg1)
107 {
108 	return (ql_isr_aif(arg1, 0));
109 }
110 
111 /*
112  * ql_isr_default
113  *	Process unknown/unvectored intr types
114  *
115  * Input:
116  *	arg1:	adapter state pointer.
117  *	arg2:	interrupt vector.
118  *
119  * Returns:
120  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
121  *
122  * Context:
123  *	Interrupt or Kernel context, no mailbox commands allowed.
124  */
125 /* ARGSUSED */
126 uint_t
127 ql_isr_default(caddr_t arg1, caddr_t arg2)
128 {
129 	ql_adapter_state_t	*ha = (void *)arg1;
130 
131 	EL(ha, "isr_default called: idx=%x\n", arg2);
132 	return (ql_isr_aif(arg1, arg2));
133 }
134 
135 /*
136  * ql_isr_aif
137  *	Process mailbox and I/O command completions.
138  *
139  * Input:
140  *	arg:	adapter state pointer.
141  *	intvec:	interrupt vector.
142  *
143  * Returns:
144  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
145  *
146  * Context:
147  *	Interrupt or Kernel context, no mailbox commands allowed.
148  */
149 /* ARGSUSED */
150 uint_t
151 ql_isr_aif(caddr_t arg, caddr_t intvec)
152 {
153 	uint16_t		mbx;
154 	uint32_t		stat;
155 	ql_adapter_state_t	*ha = (void *)arg;
156 	uint32_t		set_flags = 0;
157 	uint32_t		reset_flags = 0;
158 	ql_head_t		isr_done_q = {NULL, NULL};
159 	uint_t			rval = DDI_INTR_UNCLAIMED;
160 	int			spurious_intr = 0;
161 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
162 	int			intr_loop = 4;
163 
164 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
165 
166 	QL_PM_LOCK(ha);
167 	if (ha->power_level != PM_LEVEL_D0) {
168 		/*
169 		 * Looks like we are about to go down soon, exit early.
170 		 */
171 		QL_PM_UNLOCK(ha);
172 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
173 		return (DDI_INTR_UNCLAIMED);
174 	}
175 	ha->busy++;
176 	QL_PM_UNLOCK(ha);
177 
178 	/* Acquire interrupt lock. */
179 	INTR_LOCK(ha);
180 
181 	if (CFG_IST(ha, CFG_CTRL_2200)) {
182 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
183 			/* Reset idle timer. */
184 			ha->idle_timer = 0;
185 			rval = DDI_INTR_CLAIMED;
186 			if (intr_loop) {
187 				intr_loop--;
188 			}
189 
190 			/* Special Fast Post 2200. */
191 			stat = 0;
192 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
193 			    ha->flags & ONLINE) {
194 				ql_srb_t	*sp;
195 
196 				mbx = RD16_IO_REG(ha, mailbox[23]);
197 
198 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
199 					/* Release mailbox registers. */
200 					WRT16_IO_REG(ha, semaphore, 0);
201 
202 					if (intr_loop) {
203 						WRT16_IO_REG(ha, hccr,
204 						    HC_CLR_RISC_INT);
205 					}
206 
207 					/* Get handle. */
208 					mbx >>= 4;
209 					stat = mbx & OSC_INDEX_MASK;
210 
211 					/* Validate handle. */
212 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
213 					    ha->outstanding_cmds[stat] : NULL;
214 
215 					if (sp != NULL && (sp->handle & 0xfff)
216 					    == mbx) {
217 						ha->outstanding_cmds[stat] =
218 						    NULL;
219 						sp->handle = 0;
220 						sp->flags &=
221 						    ~SRB_IN_TOKEN_ARRAY;
222 
223 						/* Set completed status. */
224 						sp->flags |= SRB_ISP_COMPLETED;
225 
226 						/* Set completion status */
227 						sp->pkt->pkt_reason =
228 						    CS_COMPLETE;
229 
230 						ql_fast_fcp_post(sp);
231 					} else if (mbx !=
232 					    (QL_FCA_BRAND & 0xfff)) {
233 						if (sp == NULL) {
234 							EL(ha, "unknown IOCB"
235 							    " handle=%xh\n",
236 							    mbx);
237 						} else {
238 							EL(ha, "mismatch IOCB"
239 							    " handle pkt=%xh, "
240 							    "sp=%xh\n", mbx,
241 							    sp->handle & 0xfff);
242 						}
243 
244 						(void) ql_binary_fw_dump(ha,
245 						    FALSE);
246 
247 						if (!(ha->task_daemon_flags &
248 						    (ISP_ABORT_NEEDED |
249 						    ABORT_ISP_ACTIVE))) {
250 							EL(ha, "ISP Invalid "
251 							    "handle, "
252 							    "isp_abort_needed"
253 							    "\n");
254 							set_flags |=
255 							    ISP_ABORT_NEEDED;
256 						}
257 					}
258 				}
259 			}
260 
261 			if (stat == 0) {
262 				/* Check for mailbox interrupt. */
263 				mbx = RD16_IO_REG(ha, semaphore);
264 				if (mbx & BIT_0) {
265 					/* Release mailbox registers. */
266 					WRT16_IO_REG(ha, semaphore, 0);
267 
268 					/* Get mailbox data. */
269 					mbx = RD16_IO_REG(ha, mailbox[0]);
270 					if (mbx > 0x3fff && mbx < 0x8000) {
271 						ql_mbx_completion(ha, mbx,
272 						    &set_flags, &reset_flags,
273 						    intr_loop);
274 					} else if (mbx > 0x7fff &&
275 					    mbx < 0xc000) {
276 						ql_async_event(ha, mbx,
277 						    &isr_done_q, &set_flags,
278 						    &reset_flags, intr_loop);
279 					} else {
280 						EL(ha, "UNKNOWN interrupt "
281 						    "type\n");
282 						intr = B_TRUE;
283 					}
284 				} else {
285 					ha->isp_rsp_index = RD16_IO_REG(ha,
286 					    resp_in);
287 
288 					if (ha->isp_rsp_index !=
289 					    ha->rsp_ring_index) {
290 						ql_response_pkt(ha,
291 						    &isr_done_q, &set_flags,
292 						    &reset_flags, intr_loop);
293 					} else if (++spurious_intr ==
294 					    MAX_SPURIOUS_INTR) {
295 						/*
296 						 * Process excessive
297 						 * spurious intrrupts
298 						 */
299 						ql_spurious_intr(ha,
300 						    intr_loop);
301 						EL(ha, "excessive spurious "
302 						    "interrupts, "
303 						    "isp_abort_needed\n");
304 						set_flags |= ISP_ABORT_NEEDED;
305 					} else {
306 						intr = B_TRUE;
307 					}
308 				}
309 			}
310 
311 			/* Clear RISC interrupt */
312 			if (intr || intr_loop == 0) {
313 				intr = B_FALSE;
314 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
315 			}
316 
317 			if (set_flags != 0 || reset_flags != 0) {
318 				TASK_DAEMON_LOCK(ha);
319 				ha->task_daemon_flags |= set_flags;
320 				ha->task_daemon_flags &= ~reset_flags;
321 				TASK_DAEMON_UNLOCK(ha);
322 				set_flags = 0;
323 				reset_flags = 0;
324 				daemon = B_TRUE;
325 			}
326 		}
327 	} else {
328 		while ((stat = RD32_IO_REG(ha, intr_info_lo)) & RH_RISC_INT) {
329 			/* Capture FW defined interrupt info */
330 			mbx = MSW(stat);
331 
332 			/* Reset idle timer. */
333 			ha->idle_timer = 0;
334 			rval = DDI_INTR_CLAIMED;
335 			if (intr_loop) {
336 				intr_loop--;
337 			}
338 
339 			switch (stat & 0x1ff) {
340 			case ROM_MBX_SUCCESS:
341 			case ROM_MBX_ERR:
342 				ql_mbx_completion(ha, mbx, &set_flags,
343 				    &reset_flags, intr_loop);
344 
345 				/* Release mailbox registers. */
346 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
347 					WRT16_IO_REG(ha, semaphore, 0);
348 				}
349 				break;
350 
351 			case MBX_SUCCESS:
352 			case MBX_ERR:
353 				/* Sun FW, Release mailbox registers. */
354 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
355 					WRT16_IO_REG(ha, semaphore, 0);
356 				}
357 				ql_mbx_completion(ha, mbx, &set_flags,
358 				    &reset_flags, intr_loop);
359 				break;
360 
361 			case ASYNC_EVENT:
362 				/* Sun FW, Release mailbox registers. */
363 				if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
364 					WRT16_IO_REG(ha, semaphore, 0);
365 				}
366 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
367 				    &set_flags, &reset_flags, intr_loop);
368 				break;
369 
370 			case RESP_UPDATE:
371 				if (mbx != ha->rsp_ring_index) {
372 					ha->isp_rsp_index = mbx;
373 					ql_response_pkt(ha, &isr_done_q,
374 					    &set_flags, &reset_flags,
375 					    intr_loop);
376 				} else if (++spurious_intr ==
377 				    MAX_SPURIOUS_INTR) {
378 					/* Process excessive spurious intr. */
379 					ql_spurious_intr(ha, intr_loop);
380 					EL(ha, "excessive spurious "
381 					    "interrupts, isp_abort_needed\n");
382 					set_flags |= ISP_ABORT_NEEDED;
383 				} else {
384 					intr = B_TRUE;
385 				}
386 				break;
387 
388 			case SCSI_FAST_POST_16:
389 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
390 				ql_async_event(ha, stat, &isr_done_q,
391 				    &set_flags, &reset_flags, intr_loop);
392 				break;
393 
394 			case SCSI_FAST_POST_32:
395 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
396 				ql_async_event(ha, stat, &isr_done_q,
397 				    &set_flags, &reset_flags, intr_loop);
398 				break;
399 
400 			case CTIO_FAST_POST:
401 				stat = (stat & 0xffff0000) |
402 				    MBA_CTIO_COMPLETION;
403 				ql_async_event(ha, stat, &isr_done_q,
404 				    &set_flags, &reset_flags, intr_loop);
405 				break;
406 
407 			case IP_FAST_POST_XMT:
408 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
409 				ql_async_event(ha, stat, &isr_done_q,
410 				    &set_flags, &reset_flags, intr_loop);
411 				break;
412 
413 			case IP_FAST_POST_RCV:
414 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
415 				ql_async_event(ha, stat, &isr_done_q,
416 				    &set_flags, &reset_flags, intr_loop);
417 				break;
418 
419 			case IP_FAST_POST_BRD:
420 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
421 				ql_async_event(ha, stat, &isr_done_q,
422 				    &set_flags, &reset_flags, intr_loop);
423 				break;
424 
425 			case IP_FAST_POST_RCV_ALN:
426 				stat = (stat & 0xffff0000) |
427 				    MBA_IP_HDR_DATA_SPLIT;
428 				ql_async_event(ha, stat, &isr_done_q,
429 				    &set_flags, &reset_flags, intr_loop);
430 				break;
431 
432 			case ATIO_UPDATE:
433 				EL(ha, "unsupported ATIO queue update"
434 				    " interrupt, status=%xh\n", stat);
435 				intr = B_TRUE;
436 				break;
437 
438 			case ATIO_RESP_UPDATE:
439 				EL(ha, "unsupported ATIO response queue "
440 				    "update interrupt, status=%xh\n", stat);
441 				intr = B_TRUE;
442 				break;
443 
444 			default:
445 				ql_handle_uncommon_risc_intr(ha, stat,
446 				    &set_flags);
447 				intr = B_TRUE;
448 				break;
449 			}
450 
451 			/* Clear RISC interrupt */
452 			if (intr || intr_loop == 0) {
453 				intr = B_FALSE;
454 				CFG_IST(ha, CFG_CTRL_242581) ?
455 				    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
456 				    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
457 			}
458 
459 			if (set_flags != 0 || reset_flags != 0) {
460 				TASK_DAEMON_LOCK(ha);
461 				ha->task_daemon_flags |= set_flags;
462 				ha->task_daemon_flags &= ~reset_flags;
463 				TASK_DAEMON_UNLOCK(ha);
464 				set_flags = 0;
465 				reset_flags = 0;
466 				daemon = B_TRUE;
467 			}
468 
469 			if (ha->flags & PARITY_ERROR) {
470 				EL(ha, "parity/pause exit\n");
471 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
472 				break;
473 			}
474 		}
475 	}
476 
477 	/* Process claimed interrupts during polls. */
478 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
479 		ha->intr_claimed = B_FALSE;
480 		rval = DDI_INTR_CLAIMED;
481 	}
482 
483 	/* Release interrupt lock. */
484 	INTR_UNLOCK(ha);
485 
486 	if (daemon) {
487 		ql_awaken_task_daemon(ha, NULL, 0, 0);
488 	}
489 
490 	if (isr_done_q.first != NULL) {
491 		ql_done(isr_done_q.first);
492 	}
493 
494 	if (rval == DDI_INTR_CLAIMED) {
495 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
496 		ha->xioctl->TotalInterrupts++;
497 	} else {
498 		/*EMPTY*/
499 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
500 		    ha->instance);
501 	}
502 
503 	QL_PM_LOCK(ha);
504 	ha->busy--;
505 	QL_PM_UNLOCK(ha);
506 
507 	return (rval);
508 }
509 
510 /*
511  * ql_handle_uncommon_risc_intr
512  *	Handle an uncommon RISC interrupt.
513  *
514  * Input:
515  *	ha:		adapter state pointer.
516  *	stat:		interrupt status
517  *
518  * Context:
519  *	Interrupt or Kernel context, no mailbox commands allowed.
520  */
521 static void
522 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
523     uint32_t *set_flags)
524 {
525 	uint16_t	hccr_reg;
526 
527 	hccr_reg = RD16_IO_REG(ha, hccr);
528 
529 	if (stat & RH_RISC_PAUSED ||
530 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
531 
532 		ADAPTER_STATE_LOCK(ha);
533 		ha->flags |= PARITY_ERROR;
534 		ADAPTER_STATE_UNLOCK(ha);
535 
536 		if (ha->parity_pause_errors == 0 ||
537 		    ha->parity_hccr_err != hccr_reg ||
538 		    ha->parity_stat_err != stat) {
539 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
540 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
541 			    ha->instance, hccr_reg, stat,
542 			    ha->parity_pause_errors);
543 			ha->parity_hccr_err = hccr_reg;
544 			ha->parity_stat_err = stat;
545 		}
546 
547 		EL(ha, "parity/pause error, isp_abort_needed\n");
548 
549 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
550 			ql_reset_chip(ha);
551 		}
552 
553 		if (ha->parity_pause_errors == 0) {
554 			(void) ql_flash_errlog(ha, FLASH_ERRLOG_PARITY_ERR,
555 			    0, MSW(stat), LSW(stat));
556 		}
557 
558 		if (ha->parity_pause_errors < 0xffffffff) {
559 			ha->parity_pause_errors++;
560 		}
561 
562 		*set_flags |= ISP_ABORT_NEEDED;
563 
564 		/* Disable ISP interrupts. */
565 		WRT16_IO_REG(ha, ictrl, 0);
566 		ADAPTER_STATE_LOCK(ha);
567 		ha->flags &= ~INTERRUPTS_ENABLED;
568 		ADAPTER_STATE_UNLOCK(ha);
569 	} else {
570 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
571 		    stat, hccr_reg);
572 	}
573 }
574 
575 /*
576  * ql_spurious_intr
577  *	Inform Solaris of spurious interrupts.
578  *
579  * Input:
580  *	ha:		adapter state pointer.
581  *	intr_clr:	early interrupt clear
582  *
583  * Context:
584  *	Interrupt or Kernel context, no mailbox commands allowed.
585  */
586 static void
587 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
588 {
589 	ddi_devstate_t	state;
590 
591 	EL(ha, "Spurious interrupt\n");
592 
593 	/* Disable ISP interrupts. */
594 	WRT16_IO_REG(ha, ictrl, 0);
595 	ADAPTER_STATE_LOCK(ha);
596 	ha->flags &= ~INTERRUPTS_ENABLED;
597 	ADAPTER_STATE_UNLOCK(ha);
598 
599 	/* Clear RISC interrupt */
600 	if (intr_clr) {
601 		CFG_IST(ha, CFG_CTRL_242581) ?
602 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
603 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
604 	}
605 
606 	state = ddi_get_devstate(ha->dip);
607 	if (state == DDI_DEVSTATE_UP) {
608 		/*EMPTY*/
609 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
610 		    DDI_DEVICE_FAULT, "spurious interrupts");
611 	}
612 }
613 
614 /*
615  * ql_mbx_completion
616  *	Processes mailbox completions.
617  *
618  * Input:
619  *	ha:		adapter state pointer.
620  *	mb0:		Mailbox 0 contents.
621  *	set_flags:	task daemon flags to set.
622  *	reset_flags:	task daemon flags to reset.
623  *	intr_clr:	early interrupt clear
624  *
625  * Context:
626  *	Interrupt context.
627  */
628 /* ARGSUSED */
629 static void
630 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
631     uint32_t *reset_flags, int intr_clr)
632 {
633 	uint32_t	index;
634 	uint16_t	cnt;
635 
636 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
637 
638 	/* Load return mailbox registers. */
639 	MBX_REGISTER_LOCK(ha);
640 
641 	if (ha->mcp != NULL) {
642 		ha->mcp->mb[0] = mb0;
643 		index = ha->mcp->in_mb & ~MBX_0;
644 
645 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
646 			index >>= 1;
647 			if (index & MBX_0) {
648 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
649 				    mailbox[cnt]);
650 			}
651 		}
652 
653 	} else {
654 		EL(ha, "mcp == NULL\n");
655 	}
656 
657 	if (intr_clr) {
658 		/* Clear RISC interrupt. */
659 		CFG_IST(ha, CFG_CTRL_242581) ?
660 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
661 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
662 	}
663 
664 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
665 	if (ha->flags & INTERRUPTS_ENABLED) {
666 		cv_broadcast(&ha->cv_mbx_intr);
667 	}
668 
669 	MBX_REGISTER_UNLOCK(ha);
670 
671 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
672 }
673 
674 /*
675  * ql_async_event
676  *	Processes asynchronous events.
677  *
678  * Input:
679  *	ha:		adapter state pointer.
680  *	mbx:		Mailbox 0 register.
681  *	done_q:		head pointer to done queue.
682  *	set_flags:	task daemon flags to set.
683  *	reset_flags:	task daemon flags to reset.
684  *	intr_clr:	early interrupt clear
685  *
686  * Context:
687  *	Interrupt or Kernel context, no mailbox commands allowed.
688  */
689 static void
690 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
691     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
692 {
693 	uint32_t		handle;
694 	uint32_t		index;
695 	uint16_t		cnt;
696 	uint16_t		mb[MAX_MBOX_COUNT];
697 	ql_srb_t		*sp;
698 	port_id_t		s_id;
699 	ql_tgt_t		*tq;
700 	boolean_t		intr = B_TRUE;
701 	ql_adapter_state_t	*vha;
702 
703 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
704 
705 	/* Setup to process fast completion. */
706 	mb[0] = LSW(mbx);
707 	switch (mb[0]) {
708 	case MBA_SCSI_COMPLETION:
709 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox[1]),
710 		    RD16_IO_REG(ha, mailbox[2]));
711 		break;
712 
713 	case MBA_CMPLT_1_16BIT:
714 		handle = MSW(mbx);
715 		mb[0] = MBA_SCSI_COMPLETION;
716 		break;
717 
718 	case MBA_CMPLT_1_32BIT:
719 		handle = SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
720 		mb[0] = MBA_SCSI_COMPLETION;
721 		break;
722 
723 	case MBA_CTIO_COMPLETION:
724 	case MBA_IP_COMPLETION:
725 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
726 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2])) :
727 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox[2]));
728 		mb[0] = MBA_SCSI_COMPLETION;
729 		break;
730 
731 	default:
732 		break;
733 	}
734 
735 	/* Handle asynchronous event */
736 	switch (mb[0]) {
737 	case MBA_SCSI_COMPLETION:
738 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
739 		    ha->instance);
740 
741 		if (intr_clr) {
742 			/* Clear RISC interrupt */
743 			CFG_IST(ha, CFG_CTRL_242581) ?
744 			    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
745 			    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
746 			intr = B_FALSE;
747 		}
748 
749 		if ((ha->flags & ONLINE) == 0) {
750 			break;
751 		}
752 
753 		/* Get handle. */
754 		index = handle & OSC_INDEX_MASK;
755 
756 		/* Validate handle. */
757 		sp = index < MAX_OUTSTANDING_COMMANDS ?
758 		    ha->outstanding_cmds[index] : NULL;
759 
760 		if (sp != NULL && sp->handle == handle) {
761 			ha->outstanding_cmds[index] = NULL;
762 			sp->handle = 0;
763 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
764 
765 			/* Set completed status. */
766 			sp->flags |= SRB_ISP_COMPLETED;
767 
768 			/* Set completion status */
769 			sp->pkt->pkt_reason = CS_COMPLETE;
770 
771 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
772 				/* Place block on done queue */
773 				ql_add_link_b(done_q, &sp->cmd);
774 			} else {
775 				ql_fast_fcp_post(sp);
776 			}
777 		} else if (handle != QL_FCA_BRAND) {
778 			if (sp == NULL) {
779 				EL(ha, "%xh unknown IOCB handle=%xh\n",
780 				    mb[0], handle);
781 			} else {
782 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
783 				    "sp=%xh\n", mb[0], handle, sp->handle);
784 			}
785 
786 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
787 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
788 			    RD16_IO_REG(ha, mailbox[1]),
789 			    RD16_IO_REG(ha, mailbox[2]),
790 			    RD16_IO_REG(ha, mailbox[3]),
791 			    RD16_IO_REG(ha, mailbox[6]),
792 			    RD16_IO_REG(ha, mailbox[7]));
793 
794 			(void) ql_binary_fw_dump(ha, FALSE);
795 
796 			if (!(ha->task_daemon_flags &
797 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
798 				EL(ha, "%xh ISP Invalid handle, "
799 				    "isp_abort_needed\n", mb[0]);
800 				*set_flags |= ISP_ABORT_NEEDED;
801 			}
802 		}
803 		break;
804 
805 	case MBA_RESET:		/* Reset */
806 		EL(ha, "%xh Reset received\n", mb[0]);
807 		*set_flags |= RESET_MARKER_NEEDED;
808 		break;
809 
810 	case MBA_SYSTEM_ERR:		/* System Error */
811 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
812 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
813 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
814 
815 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
816 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
817 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
818 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
819 		    RD16_IO_REG(ha, mailbox[4]), RD16_IO_REG(ha, mailbox[5]),
820 		    RD16_IO_REG(ha, mailbox[6]), RD16_IO_REG(ha, mailbox[7]),
821 		    RD16_IO_REG(ha, mailbox[8]), RD16_IO_REG(ha, mailbox[9]),
822 		    RD16_IO_REG(ha, mailbox[10]), RD16_IO_REG(ha, mailbox[11]),
823 		    RD16_IO_REG(ha, mailbox[12]));
824 
825 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
826 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
827 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
828 		    mb[0], RD16_IO_REG(ha, mailbox[13]),
829 		    RD16_IO_REG(ha, mailbox[14]), RD16_IO_REG(ha, mailbox[15]),
830 		    RD16_IO_REG(ha, mailbox[16]), RD16_IO_REG(ha, mailbox[17]),
831 		    RD16_IO_REG(ha, mailbox[18]), RD16_IO_REG(ha, mailbox[19]),
832 		    RD16_IO_REG(ha, mailbox[20]), RD16_IO_REG(ha, mailbox[21]),
833 		    RD16_IO_REG(ha, mailbox[22]),
834 		    RD16_IO_REG(ha, mailbox[23]));
835 
836 		if (ha->reg_off->mbox_cnt > 24) {
837 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
838 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
839 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
840 			    RD16_IO_REG(ha, mailbox[24]),
841 			    RD16_IO_REG(ha, mailbox[25]),
842 			    RD16_IO_REG(ha, mailbox[26]),
843 			    RD16_IO_REG(ha, mailbox[27]),
844 			    RD16_IO_REG(ha, mailbox[28]),
845 			    RD16_IO_REG(ha, mailbox[29]),
846 			    RD16_IO_REG(ha, mailbox[30]),
847 			    RD16_IO_REG(ha, mailbox[31]));
848 		}
849 
850 		(void) ql_binary_fw_dump(ha, FALSE);
851 
852 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
853 		    mb[2], mb[3]);
854 
855 		*set_flags |= ISP_ABORT_NEEDED;
856 		ha->xioctl->ControllerErrorCount++;
857 		break;
858 
859 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
860 		EL(ha, "%xh Request Transfer Error received, "
861 		    "isp_abort_needed\n", mb[0]);
862 
863 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
864 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
865 		    RD16_IO_REG(ha, mailbox[3]));
866 
867 		*set_flags |= ISP_ABORT_NEEDED;
868 		ha->xioctl->ControllerErrorCount++;
869 		break;
870 
871 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
872 		EL(ha, "%xh Response Transfer Error received,"
873 		    " isp_abort_needed\n", mb[0]);
874 
875 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
876 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]),
877 		    RD16_IO_REG(ha, mailbox[3]));
878 
879 		*set_flags |= ISP_ABORT_NEEDED;
880 		ha->xioctl->ControllerErrorCount++;
881 		break;
882 
883 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
884 		EL(ha, "%xh Request Queue Wake-up received\n",
885 		    mb[0]);
886 		break;
887 
888 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
889 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
890 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
891 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
892 
893 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
894 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
895 
896 		switch (mb[1]) {
897 		case MLA_LOGIN_OPERATIONAL_FW:
898 			ADAPTER_STATE_LOCK(ha);
899 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
900 			ADAPTER_STATE_UNLOCK(ha);
901 			break;
902 		case MLA_PANIC_RECOVERY:
903 		case MLA_LOGIN_DIAGNOSTIC_FW:
904 		case MLA_LOGIN_GOLDEN_FW:
905 		case MLA_REJECT_RESPONSE:
906 		default:
907 			break;
908 		}
909 		break;
910 
911 	case MBA_LIP_F8:	/* Received a LIP F8. */
912 	case MBA_LIP_RESET:	/* LIP reset occurred. */
913 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
914 	/* case MBA_DCBX_STARTED: */
915 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
916 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
917 			    "\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
918 			    RD16_IO_REG(ha, mailbox[2]));
919 		} else {
920 			EL(ha, "%xh LIP received\n", mb[0]);
921 		}
922 
923 		ADAPTER_STATE_LOCK(ha);
924 		ha->flags &= ~POINT_TO_POINT;
925 		ADAPTER_STATE_UNLOCK(ha);
926 
927 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
928 			*set_flags |= LOOP_DOWN;
929 		}
930 		ql_port_state(ha, FC_STATE_OFFLINE,
931 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
932 
933 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
934 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
935 		}
936 
937 		ha->adapter_stats->lip_count++;
938 
939 		/* Update AEN queue. */
940 		ha->xioctl->TotalLipResets++;
941 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
942 			ql_enqueue_aen(ha, mb[0], NULL);
943 		}
944 		break;
945 
946 	case MBA_LOOP_UP:
947 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
948 		    CFG_CTRL_242581))) {
949 			mb[1] = RD16_IO_REG(ha, mailbox[1]);
950 			if (mb[1] == IIDMA_RATE_1GB) {		/* 1GB */
951 				ha->state = FC_PORT_STATE_MASK(
952 				    ha->state) | FC_STATE_1GBIT_SPEED;
953 				index = 1;
954 			} else if (mb[1] == IIDMA_RATE_2GB) {	/* 2GB */
955 				ha->state = FC_PORT_STATE_MASK(
956 				    ha->state) | FC_STATE_2GBIT_SPEED;
957 				index = 2;
958 			} else if (mb[1] == IIDMA_RATE_4GB) {	/* 4GB */
959 				ha->state = FC_PORT_STATE_MASK(
960 				    ha->state) | FC_STATE_4GBIT_SPEED;
961 				index = 4;
962 			} else if (mb[1] == IIDMA_RATE_8GB) {	/* 8GB */
963 				ha->state = FC_PORT_STATE_MASK(
964 				    ha->state) | FC_STATE_8GBIT_SPEED;
965 				index = 8;
966 			} else if (mb[1] == IIDMA_RATE_10GB) {	/* 10GB */
967 				ha->state = FC_PORT_STATE_MASK(
968 				    ha->state) | FC_STATE_10GBIT_SPEED;
969 				index = 10;
970 			} else {
971 				ha->state = FC_PORT_STATE_MASK(
972 				    ha->state);
973 				index = 0;
974 			}
975 		} else {
976 			ha->state = FC_PORT_STATE_MASK(ha->state) |
977 			    FC_STATE_FULL_SPEED;
978 			index = 1;
979 		}
980 
981 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
982 			vha->state = FC_PORT_STATE_MASK(vha->state) |
983 			    FC_PORT_SPEED_MASK(ha->state);
984 		}
985 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
986 
987 		/* Update AEN queue. */
988 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
989 			ql_enqueue_aen(ha, mb[0], NULL);
990 		}
991 		break;
992 
993 	case MBA_LOOP_DOWN:
994 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, "
995 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
996 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
997 
998 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
999 			*set_flags |= LOOP_DOWN;
1000 		}
1001 		ql_port_state(ha, FC_STATE_OFFLINE,
1002 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1003 
1004 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1005 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1006 		}
1007 
1008 		if (CFG_IST(ha, CFG_CTRL_2581)) {
1009 			ha->sfp_stat = RD16_IO_REG(ha, mailbox[2]);
1010 		}
1011 
1012 		/* Update AEN queue. */
1013 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1014 			ql_enqueue_aen(ha, mb[0], NULL);
1015 		}
1016 		break;
1017 
1018 	case MBA_PORT_UPDATE:
1019 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1020 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1021 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1022 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1023 
1024 		/* Locate port state structure. */
1025 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1026 			if (vha->vp_index == LSB(mb[3])) {
1027 				break;
1028 			}
1029 		}
1030 		if (vha == NULL) {
1031 			break;
1032 		}
1033 		/*
1034 		 * In N port 2 N port topology the FW provides a port
1035 		 * database entry at loop_id 0x7fe which we use to
1036 		 * acquire the Ports WWPN.
1037 		 */
1038 		if ((mb[1] != 0x7fe) &&
1039 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1040 		    (CFG_IST(ha, CFG_CTRL_242581) &&
1041 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1042 			EL(ha, "%xh Port Database Update, Login/Logout "
1043 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1044 			    mb[0], mb[1], mb[2], mb[3]);
1045 		} else {
1046 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1047 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1048 			    mb[3]);
1049 			*set_flags |= LOOP_RESYNC_NEEDED;
1050 			*set_flags &= ~LOOP_DOWN;
1051 			*reset_flags |= LOOP_DOWN;
1052 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1053 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1054 			TASK_DAEMON_LOCK(ha);
1055 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1056 			vha->task_daemon_flags &= ~LOOP_DOWN;
1057 			TASK_DAEMON_UNLOCK(ha);
1058 			ADAPTER_STATE_LOCK(ha);
1059 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1060 			ADAPTER_STATE_UNLOCK(ha);
1061 		}
1062 
1063 		/* Update AEN queue. */
1064 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1065 			ql_enqueue_aen(ha, mb[0], NULL);
1066 		}
1067 		break;
1068 
1069 	case MBA_RSCN_UPDATE:
1070 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1071 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1072 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1073 		    RD16_IO_REG(ha, mailbox[3]) : 0);
1074 
1075 		/* Locate port state structure. */
1076 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1077 			if (vha->vp_index == LSB(mb[3])) {
1078 				break;
1079 			}
1080 		}
1081 
1082 		if (vha == NULL) {
1083 			break;
1084 		}
1085 
1086 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1087 		    MSB(mb[2]) == vha->d_id.b.area &&
1088 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1089 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1090 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1091 		} else {
1092 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1093 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1094 			if (FC_PORT_STATE_MASK(vha->state) !=
1095 			    FC_STATE_OFFLINE) {
1096 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1097 				TASK_DAEMON_LOCK(ha);
1098 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1099 				TASK_DAEMON_UNLOCK(ha);
1100 				*set_flags |= RSCN_UPDATE_NEEDED;
1101 			}
1102 		}
1103 
1104 		/* Update AEN queue. */
1105 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1106 			ql_enqueue_aen(ha, mb[0], NULL);
1107 		}
1108 		break;
1109 
1110 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1111 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1112 		    RD16_IO_REG(ha, mailbox[1]));
1113 		break;
1114 
1115 	case MBA_IP_RECEIVE:
1116 	case MBA_IP_BROADCAST:
1117 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1118 		mb[2] = RD16_IO_REG(ha, mailbox[2]);
1119 		mb[3] = RD16_IO_REG(ha, mailbox[3]);
1120 
1121 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1122 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1123 
1124 		/* Locate device queue. */
1125 		s_id.b.al_pa = LSB(mb[2]);
1126 		s_id.b.area = MSB(mb[2]);
1127 		s_id.b.domain = LSB(mb[1]);
1128 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1129 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1130 			break;
1131 		}
1132 
1133 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1134 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1135 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1136 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1137 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1138 
1139 		tq->ub_sequence_length = mb[3];
1140 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1141 		if (mb[3] % cnt) {
1142 			tq->ub_total_seg_cnt++;
1143 		}
1144 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1145 
1146 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1147 		    index++) {
1148 			mb[index] = RD16_IO_REG(ha, mailbox[index]);
1149 		}
1150 
1151 		tq->ub_seq_id = ++ha->ub_seq_id;
1152 		tq->ub_seq_cnt = 0;
1153 		tq->ub_frame_ro = 0;
1154 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1155 		    (CFG_IST(ha, CFG_CTRL_242581) ? BROADCAST_24XX_HDL :
1156 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1157 		ha->rcv_dev_q = tq;
1158 
1159 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1160 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1161 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1162 			    QL_SUCCESS) {
1163 				EL(ha, "ql_ub_frame_hdr failed, "
1164 				    "isp_abort_needed\n");
1165 				*set_flags |= ISP_ABORT_NEEDED;
1166 				break;
1167 			}
1168 		}
1169 		break;
1170 
1171 	case MBA_IP_LOW_WATER_MARK:
1172 	case MBA_IP_RCV_BUFFER_EMPTY:
1173 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1174 		    mb[0]);
1175 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1176 		break;
1177 
1178 	case MBA_IP_HDR_DATA_SPLIT:
1179 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1180 		break;
1181 
1182 	case MBA_POINT_TO_POINT:
1183 	/* case MBA_DCBX_COMPLETED: */
1184 		if (CFG_IST(ha, CFG_CTRL_81XX)) {
1185 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1186 		} else {
1187 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1188 		}
1189 		ADAPTER_STATE_LOCK(ha);
1190 		ha->flags |= POINT_TO_POINT;
1191 		ADAPTER_STATE_UNLOCK(ha);
1192 		break;
1193 
1194 	case MBA_FCF_CONFIG_ERROR:
1195 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh, "
1196 		    "mbx2=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1197 		    RD16_IO_REG(ha, mailbox[2]));
1198 		break;
1199 
1200 	case MBA_DCBX_PARAM_UPDATE:
1201 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh, "
1202 		    "mbx2=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1203 		    RD16_IO_REG(ha, mailbox[2]));
1204 		break;
1205 
1206 	case MBA_CHG_IN_CONNECTION:
1207 		mb[1] = RD16_IO_REG(ha, mailbox[1]);
1208 		if (mb[1] == 2) {
1209 			EL(ha, "%xh Change In Connection received, "
1210 			    "mbx1=%xh\n",  mb[0], mb[1]);
1211 			ADAPTER_STATE_LOCK(ha);
1212 			ha->flags &= ~POINT_TO_POINT;
1213 			ADAPTER_STATE_UNLOCK(ha);
1214 			if (ha->topology & QL_N_PORT) {
1215 				ha->topology = (uint8_t)(ha->topology &
1216 				    ~QL_N_PORT);
1217 				ha->topology = (uint8_t)(ha->topology |
1218 				    QL_NL_PORT);
1219 			}
1220 		} else {
1221 			EL(ha, "%xh Change In Connection received, "
1222 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1223 			*set_flags |= ISP_ABORT_NEEDED;
1224 		}
1225 		break;
1226 
1227 	case MBA_ZIO_UPDATE:
1228 		EL(ha, "%xh ZIO response received\n", mb[0]);
1229 
1230 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1231 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1232 		intr = B_FALSE;
1233 		break;
1234 
1235 	case MBA_PORT_BYPASS_CHANGED:
1236 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1237 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1238 		/*
1239 		 * Event generated when there is a transition on
1240 		 * port bypass of crystal+.
1241 		 * Mailbox 1:	Bit 0 - External.
1242 		 *		Bit 2 - Internal.
1243 		 * When the bit is 0, the port is bypassed.
1244 		 *
1245 		 * For now we will generate a LIP for all cases.
1246 		 */
1247 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1248 		break;
1249 
1250 	case MBA_RECEIVE_ERROR:
1251 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1252 		    mb[0], RD16_IO_REG(ha, mailbox[1]),
1253 		    RD16_IO_REG(ha, mailbox[2]));
1254 		break;
1255 
1256 	case MBA_LS_RJT_SENT:
1257 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1258 		    RD16_IO_REG(ha, mailbox[1]));
1259 		break;
1260 
1261 	case MBA_FW_RESTART_COMP:
1262 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1263 		    mb[0], RD16_IO_REG(ha, mailbox[1]));
1264 		break;
1265 
1266 	case MBA_IDC_COMPLETE:
1267 		EL(ha, "%xh Inter-driver communication complete received, "
1268 		    "mbx1=%xh, mbx2=%xh\n", mb[0],
1269 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1270 		break;
1271 
1272 	case MBA_IDC_NOTIFICATION:
1273 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox[1]);
1274 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox[2]);
1275 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox[3]);
1276 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox[4]);
1277 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox[5]);
1278 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox[6]);
1279 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox[7]);
1280 		EL(ha, "%xh Inter-driver communication request notification "
1281 		    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1282 		    "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1283 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1284 		    ha->idc_mb[6], ha->idc_mb[7]);
1285 		*set_flags |= IDC_ACK_NEEDED;
1286 		break;
1287 
1288 	case MBA_IDC_TIME_EXTENDED:
1289 		EL(ha, "%xh Inter-driver communication time extended received,"
1290 		    " mbx1=%xh, mbx2=%xh\n", mb[0],
1291 		    RD16_IO_REG(ha, mailbox[1]), RD16_IO_REG(ha, mailbox[2]));
1292 		break;
1293 
1294 	default:
1295 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1296 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox[1]),
1297 		    RD16_IO_REG(ha, mailbox[2]), RD16_IO_REG(ha, mailbox[3]));
1298 		break;
1299 	}
1300 
1301 	/* Clear RISC interrupt */
1302 	if (intr && intr_clr) {
1303 		CFG_IST(ha, CFG_CTRL_242581) ?
1304 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1305 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1306 	}
1307 
1308 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1309 }
1310 
1311 /*
1312  * ql_fast_fcp_post
1313  *	Fast path for good SCSI I/O completion.
1314  *
1315  * Input:
1316  *	sp:	SRB pointer.
1317  *
1318  * Context:
1319  *	Interrupt or Kernel context, no mailbox commands allowed.
1320  */
1321 static void
1322 ql_fast_fcp_post(ql_srb_t *sp)
1323 {
1324 	ql_adapter_state_t	*ha = sp->ha;
1325 	ql_lun_t		*lq = sp->lun_queue;
1326 	ql_tgt_t		*tq = lq->target_queue;
1327 
1328 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1329 
1330 	ASSERT(sp->flags & SRB_FCP_CMD_PKT && ha &&
1331 	    sp->pkt->pkt_reason == CS_COMPLETE);
1332 
1333 	/* Acquire device queue lock. */
1334 	DEVICE_QUEUE_LOCK(tq);
1335 
1336 	/* Decrement outstanding commands on device. */
1337 	if (tq->outcnt != 0) {
1338 		tq->outcnt--;
1339 	}
1340 
1341 	if (sp->flags & SRB_FCP_CMD_PKT) {
1342 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1343 			/*
1344 			 * Clear the flag for this LUN so that
1345 			 * untagged commands can be submitted
1346 			 * for it.
1347 			 */
1348 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1349 		}
1350 
1351 		if (lq->lun_outcnt != 0) {
1352 			lq->lun_outcnt--;
1353 		}
1354 	}
1355 
1356 	/* Reset port down retry count on good completion. */
1357 	tq->port_down_retry_count = ha->port_down_retry_count;
1358 	tq->qfull_retry_count = ha->qfull_retry_count;
1359 
1360 	/* Remove command from watchdog queue. */
1361 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1362 		ql_remove_link(&tq->wdg, &sp->wdg);
1363 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1364 	}
1365 
1366 	if (lq->cmd.first != NULL) {
1367 		ql_next(ha, lq);
1368 	} else {
1369 		/* Release LU queue specific lock. */
1370 		DEVICE_QUEUE_UNLOCK(tq);
1371 		if (ha->pha->pending_cmds.first != NULL) {
1372 			ql_start_iocb(ha, NULL);
1373 		}
1374 	}
1375 
1376 	/* Sync buffers if required.  */
1377 	if (sp->flags & SRB_MS_PKT) {
1378 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1379 		    DDI_DMA_SYNC_FORCPU);
1380 	}
1381 
1382 	/* Map ISP completion codes. */
1383 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1384 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1385 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1386 
1387 	/* Now call the pkt completion callback */
1388 	if (sp->flags & SRB_POLL) {
1389 		sp->flags &= ~SRB_POLL;
1390 	} else if (sp->pkt->pkt_comp) {
1391 		INTR_UNLOCK(ha);
1392 		(*sp->pkt->pkt_comp)(sp->pkt);
1393 		INTR_LOCK(ha);
1394 	}
1395 
1396 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1397 }
1398 
1399 /*
1400  * ql_response_pkt
1401  *	Processes response entry.
1402  *
1403  * Input:
1404  *	ha:		adapter state pointer.
1405  *	done_q:		head pointer to done queue.
1406  *	set_flags:	task daemon flags to set.
1407  *	reset_flags:	task daemon flags to reset.
1408  *	intr_clr:	early interrupt clear
1409  *
1410  * Context:
1411  *	Interrupt or Kernel context, no mailbox commands allowed.
1412  */
1413 static void
1414 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1415     uint32_t *reset_flags, int intr_clr)
1416 {
1417 	response_t	*pkt;
1418 	uint32_t	dma_sync_size_1 = 0;
1419 	uint32_t	dma_sync_size_2 = 0;
1420 	int		status = 0;
1421 
1422 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1423 
1424 	/* Clear RISC interrupt */
1425 	if (intr_clr) {
1426 		CFG_IST(ha, CFG_CTRL_242581) ?
1427 		    WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT) :
1428 		    WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1429 	}
1430 
1431 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1432 		EL(ha, "index error = %xh, isp_abort_needed",
1433 		    ha->isp_rsp_index);
1434 		*set_flags |= ISP_ABORT_NEEDED;
1435 		return;
1436 	}
1437 
1438 	if ((ha->flags & ONLINE) == 0) {
1439 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1440 		return;
1441 	}
1442 
1443 	/* Calculate size of response queue entries to sync. */
1444 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1445 		dma_sync_size_1 = (uint32_t)
1446 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1447 		    RESPONSE_ENTRY_SIZE);
1448 	} else if (ha->isp_rsp_index == 0) {
1449 		dma_sync_size_1 = (uint32_t)
1450 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1451 		    RESPONSE_ENTRY_SIZE);
1452 	} else {
1453 		/* Responses wrap around the Q */
1454 		dma_sync_size_1 = (uint32_t)
1455 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1456 		    RESPONSE_ENTRY_SIZE);
1457 		dma_sync_size_2 = (uint32_t)
1458 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1459 	}
1460 
1461 	/* Sync DMA buffer. */
1462 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1463 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1464 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1465 	    DDI_DMA_SYNC_FORKERNEL);
1466 	if (dma_sync_size_2) {
1467 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1468 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1469 		    DDI_DMA_SYNC_FORKERNEL);
1470 	}
1471 
1472 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1473 		pkt = ha->response_ring_ptr;
1474 
1475 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1476 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1477 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1478 		    RESPONSE_ENTRY_SIZE);
1479 
1480 		/* Adjust ring index. */
1481 		ha->rsp_ring_index++;
1482 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1483 			ha->rsp_ring_index = 0;
1484 			ha->response_ring_ptr = ha->response_ring_bp;
1485 		} else {
1486 			ha->response_ring_ptr++;
1487 		}
1488 
1489 		/* Process packet. */
1490 		if (ha->status_srb != NULL && pkt->entry_type !=
1491 		    STATUS_CONT_TYPE) {
1492 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1493 			ha->status_srb = NULL;
1494 		}
1495 
1496 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_242581) ?
1497 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1498 
1499 		if (pkt->entry_status != 0) {
1500 			ql_error_entry(ha, pkt, done_q, set_flags,
1501 			    reset_flags);
1502 		} else {
1503 			switch (pkt->entry_type) {
1504 			case STATUS_TYPE:
1505 				status |= CFG_IST(ha, CFG_CTRL_242581) ?
1506 				    ql_24xx_status_entry(ha,
1507 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1508 				    reset_flags) :
1509 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1510 				    done_q, set_flags, reset_flags);
1511 				break;
1512 			case STATUS_CONT_TYPE:
1513 				ql_status_cont_entry(ha,
1514 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1515 				    reset_flags);
1516 				break;
1517 			case IP_TYPE:
1518 			case IP_A64_TYPE:
1519 			case IP_CMD_TYPE:
1520 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1521 				    set_flags, reset_flags);
1522 				break;
1523 			case IP_RECEIVE_TYPE:
1524 				ql_ip_rcv_entry(ha,
1525 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1526 				    reset_flags);
1527 				break;
1528 			case IP_RECEIVE_CONT_TYPE:
1529 				ql_ip_rcv_cont_entry(ha,
1530 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1531 				    set_flags, reset_flags);
1532 				break;
1533 			case IP_24XX_RECEIVE_TYPE:
1534 				ql_ip_24xx_rcv_entry(ha,
1535 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1536 				    set_flags, reset_flags);
1537 				break;
1538 			case MS_TYPE:
1539 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1540 				    set_flags, reset_flags);
1541 				break;
1542 			case REPORT_ID_TYPE:
1543 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1544 				    done_q, set_flags, reset_flags);
1545 				break;
1546 			case ELS_PASSTHRU_TYPE:
1547 				ql_els_passthru_entry(ha,
1548 				    (els_passthru_entry_rsp_t *)pkt,
1549 				    done_q, set_flags, reset_flags);
1550 				break;
1551 			case IP_BUF_POOL_TYPE:
1552 			case MARKER_TYPE:
1553 			case VP_MODIFY_TYPE:
1554 			case VP_CONTROL_TYPE:
1555 				break;
1556 			default:
1557 				EL(ha, "Unknown IOCB entry type=%xh\n",
1558 				    pkt->entry_type);
1559 				break;
1560 			}
1561 		}
1562 	}
1563 
1564 	/* Inform RISC of processed responses. */
1565 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1566 
1567 	/* RESET packet received delay for possible async event. */
1568 	if (status & BIT_0) {
1569 		drv_usecwait(500000);
1570 	}
1571 
1572 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1573 }
1574 
1575 /*
1576  * ql_error_entry
1577  *	Processes error entry.
1578  *
1579  * Input:
1580  *	ha = adapter state pointer.
1581  *	pkt = entry pointer.
1582  *	done_q = head pointer to done queue.
1583  *	set_flags = task daemon flags to set.
1584  *	reset_flags = task daemon flags to reset.
1585  *
1586  * Context:
1587  *	Interrupt or Kernel context, no mailbox commands allowed.
1588  */
1589 /* ARGSUSED */
1590 static void
1591 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1592     uint32_t *set_flags, uint32_t *reset_flags)
1593 {
1594 	ql_srb_t	*sp;
1595 	uint32_t	index, cnt;
1596 
1597 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1598 		EL(ha, "Aborted command\n");
1599 		return;
1600 	}
1601 
1602 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1603 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1604 
1605 	if (pkt->entry_status & BIT_6) {
1606 		EL(ha, "Request Queue DMA error\n");
1607 	} else if (pkt->entry_status & BIT_5) {
1608 		EL(ha, "Invalid Entry Order\n");
1609 	} else if (pkt->entry_status & BIT_4) {
1610 		EL(ha, "Invalid Entry Count\n");
1611 	} else if (pkt->entry_status & BIT_3) {
1612 		EL(ha, "Invalid Entry Parameter\n");
1613 	} else if (pkt->entry_status & BIT_2) {
1614 		EL(ha, "Invalid Entry Type\n");
1615 	} else if (pkt->entry_status & BIT_1) {
1616 		EL(ha, "Busy\n");
1617 	} else {
1618 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1619 	}
1620 
1621 	/* Get handle. */
1622 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1623 	index = cnt & OSC_INDEX_MASK;
1624 
1625 	/* Validate handle. */
1626 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1627 	    NULL;
1628 
1629 	if (sp != NULL && sp->handle == cnt) {
1630 		ha->outstanding_cmds[index] = NULL;
1631 		sp->handle = 0;
1632 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1633 
1634 		/* Bad payload or header */
1635 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1636 			/* Bad payload or header, set error status. */
1637 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1638 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1639 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1640 		} else {
1641 			/* Set error status. */
1642 			sp->pkt->pkt_reason = CS_UNKNOWN;
1643 		}
1644 
1645 		/* Set completed status. */
1646 		sp->flags |= SRB_ISP_COMPLETED;
1647 
1648 		/* Place command on done queue. */
1649 		ql_add_link_b(done_q, &sp->cmd);
1650 
1651 	} else {
1652 		if (sp == NULL) {
1653 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
1654 		} else {
1655 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1656 			    cnt, sp->handle);
1657 		}
1658 
1659 		(void) ql_binary_fw_dump(ha, FALSE);
1660 
1661 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1662 		    ABORT_ISP_ACTIVE))) {
1663 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1664 			*set_flags |= ISP_ABORT_NEEDED;
1665 		}
1666 	}
1667 
1668 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1669 }
1670 
1671 /*
1672  * ql_status_entry
1673  *	Processes received ISP2200-2300 status entry.
1674  *
1675  * Input:
1676  *	ha:		adapter state pointer.
1677  *	pkt:		entry pointer.
1678  *	done_q:		done queue pointer.
1679  *	set_flags:	task daemon flags to set.
1680  *	reset_flags:	task daemon flags to reset.
1681  *
1682  * Returns:
1683  *	BIT_0 = CS_RESET status received.
1684  *
1685  * Context:
1686  *	Interrupt or Kernel context, no mailbox commands allowed.
1687  */
1688 /* ARGSUSED */
1689 static int
1690 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1691     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1692 {
1693 	ql_srb_t		*sp;
1694 	uint32_t		index, cnt;
1695 	uint16_t		comp_status;
1696 	int			rval = 0;
1697 
1698 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1699 
1700 	/* Get handle. */
1701 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1702 	index = cnt & OSC_INDEX_MASK;
1703 
1704 	/* Validate handle. */
1705 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1706 	    NULL;
1707 
1708 	if (sp != NULL && sp->handle == cnt) {
1709 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1710 		    &pkt->comp_status);
1711 
1712 		/*
1713 		 * We dont care about SCSI QFULLs.
1714 		 */
1715 		if (comp_status == CS_QUEUE_FULL) {
1716 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1717 			    sp->lun_queue->target_queue->d_id.b24,
1718 			    sp->lun_queue->lun_no);
1719 			comp_status = CS_COMPLETE;
1720 		}
1721 
1722 		/*
1723 		 * 2300 firmware marks completion status as data underrun
1724 		 * for scsi qfulls. Make it transport complete.
1725 		 */
1726 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1727 		    (comp_status == CS_DATA_UNDERRUN) &&
1728 		    (pkt->scsi_status_l != 0)) {
1729 			comp_status = CS_COMPLETE;
1730 		}
1731 
1732 		/*
1733 		 * Workaround T3 issue where we do not get any data xferred
1734 		 * but get back a good status.
1735 		 */
1736 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1737 		    comp_status == CS_COMPLETE &&
1738 		    pkt->scsi_status_l == 0 &&
1739 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1740 		    pkt->residual_length == 0 &&
1741 		    sp->fcp &&
1742 		    sp->fcp->fcp_data_len != 0 &&
1743 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1744 		    SF_DATA_OUT) {
1745 			comp_status = CS_ABORTED;
1746 		}
1747 
1748 		if (sp->flags & SRB_MS_PKT) {
1749 			/*
1750 			 * Ideally it should never be true. But there
1751 			 * is a bug in FW which upon receiving invalid
1752 			 * parameters in MS IOCB returns it as
1753 			 * status entry and not as ms entry type.
1754 			 */
1755 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1756 			    set_flags, reset_flags);
1757 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1758 			    ha->instance);
1759 			return (0);
1760 		}
1761 
1762 		ha->outstanding_cmds[index] = NULL;
1763 		sp->handle = 0;
1764 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1765 
1766 		/*
1767 		 * Fast path to good SCSI I/O completion
1768 		 */
1769 		if ((comp_status == CS_COMPLETE) &
1770 		    (!pkt->scsi_status_l) &
1771 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1772 			/* Set completed status. */
1773 			sp->flags |= SRB_ISP_COMPLETED;
1774 			sp->pkt->pkt_reason = comp_status;
1775 			ql_fast_fcp_post(sp);
1776 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1777 			    ha->instance);
1778 			return (0);
1779 		}
1780 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1781 		    reset_flags);
1782 	} else {
1783 		if (sp == NULL) {
1784 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
1785 		} else {
1786 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1787 			    cnt, sp->handle);
1788 		}
1789 
1790 		(void) ql_binary_fw_dump(ha, FALSE);
1791 
1792 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1793 		    ABORT_ISP_ACTIVE))) {
1794 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1795 			*set_flags |= ISP_ABORT_NEEDED;
1796 		}
1797 	}
1798 
1799 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1800 
1801 	return (rval);
1802 }
1803 
1804 /*
1805  * ql_24xx_status_entry
1806  *	Processes received ISP24xx status entry.
1807  *
1808  * Input:
1809  *	ha:		adapter state pointer.
1810  *	pkt:		entry pointer.
1811  *	done_q:		done queue pointer.
1812  *	set_flags:	task daemon flags to set.
1813  *	reset_flags:	task daemon flags to reset.
1814  *
1815  * Returns:
1816  *	BIT_0 = CS_RESET status received.
1817  *
1818  * Context:
1819  *	Interrupt or Kernel context, no mailbox commands allowed.
1820  */
1821 /* ARGSUSED */
1822 static int
1823 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1824     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1825 {
1826 	ql_srb_t		*sp;
1827 	uint32_t		index;
1828 	uint32_t		resp_identifier;
1829 	uint16_t		comp_status;
1830 	int			rval = 0;
1831 
1832 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1833 
1834 	/* Get the response identifier. */
1835 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1836 
1837 	/* extract the outstanding cmds index */
1838 	index = resp_identifier & OSC_INDEX_MASK;
1839 
1840 	/* Validate the index and get the associated srb pointer */
1841 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
1842 	    NULL;
1843 
1844 	if (sp != NULL && sp->handle == resp_identifier) {
1845 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1846 		    &pkt->comp_status);
1847 
1848 		/*
1849 		 * We dont care about SCSI QFULLs.
1850 		 */
1851 		if (comp_status == CS_QUEUE_FULL) {
1852 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1853 			    sp->lun_queue->target_queue->d_id.b24,
1854 			    sp->lun_queue->lun_no);
1855 			comp_status = CS_COMPLETE;
1856 		}
1857 
1858 		/*
1859 		 * 2300 firmware marks completion status as data underrun
1860 		 * for scsi qfulls. Make it transport complete.
1861 		 */
1862 		if ((comp_status == CS_DATA_UNDERRUN) &&
1863 		    (pkt->scsi_status_l != 0)) {
1864 			comp_status = CS_COMPLETE;
1865 		}
1866 
1867 		/*
1868 		 * Workaround T3 issue where we do not get any data xferred
1869 		 * but get back a good status.
1870 		 */
1871 		if (comp_status == CS_COMPLETE &&
1872 		    pkt->scsi_status_l == 0 &&
1873 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1874 		    pkt->residual_length != 0 &&
1875 		    sp->fcp &&
1876 		    sp->fcp->fcp_data_len != 0 &&
1877 		    sp->fcp->fcp_cntl.cntl_write_data) {
1878 			comp_status = CS_ABORTED;
1879 		}
1880 
1881 		if (sp->flags & SRB_MS_PKT) {
1882 			/*
1883 			 * Ideally it should never be true. But there
1884 			 * is a bug in FW which upon receiving invalid
1885 			 * parameters in MS IOCB returns it as
1886 			 * status entry and not as ms entry type.
1887 			 */
1888 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1889 			    set_flags, reset_flags);
1890 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1891 			    ha->instance);
1892 			return (0);
1893 		}
1894 
1895 		ha->outstanding_cmds[index] = NULL;
1896 		sp->handle = 0;
1897 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1898 
1899 		/*
1900 		 * Fast path to good SCSI I/O completion
1901 		 */
1902 		if ((comp_status == CS_COMPLETE) &
1903 		    (!pkt->scsi_status_l) &
1904 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1905 			/* Set completed status. */
1906 			sp->flags |= SRB_ISP_COMPLETED;
1907 			sp->pkt->pkt_reason = comp_status;
1908 			ql_fast_fcp_post(sp);
1909 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1910 			    ha->instance);
1911 			return (0);
1912 		}
1913 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
1914 		    set_flags, reset_flags);
1915 	} else {
1916 		if (sp == NULL) {
1917 			EL(ha, "unknown IOCB handle=%xh\n", resp_identifier);
1918 		} else {
1919 			EL(sp->ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
1920 			    resp_identifier, sp->handle);
1921 		}
1922 
1923 		(void) ql_binary_fw_dump(ha, FALSE);
1924 
1925 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
1926 		    ABORT_ISP_ACTIVE))) {
1927 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
1928 			*set_flags |= ISP_ABORT_NEEDED;
1929 		}
1930 	}
1931 
1932 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1933 
1934 	return (rval);
1935 }
1936 
1937 /*
1938  * ql_status_error
1939  *	Processes received ISP status entry error.
1940  *
1941  * Input:
1942  *	ha:		adapter state pointer.
1943  *	sp:		SRB pointer.
1944  *	pkt:		entry pointer.
1945  *	done_q:		done queue pointer.
1946  *	set_flags:	task daemon flags to set.
1947  *	reset_flags:	task daemon flags to reset.
1948  *
1949  * Returns:
1950  *	BIT_0 = CS_RESET status received.
1951  *
1952  * Context:
1953  *	Interrupt or Kernel context, no mailbox commands allowed.
1954  */
1955 /* ARGSUSED */
1956 static int
1957 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
1958     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1959 {
1960 	uint32_t		sense_sz = 0;
1961 	uint32_t		cnt;
1962 	ql_tgt_t		*tq;
1963 	fcp_rsp_t		*fcpr;
1964 	struct fcp_rsp_info	*rsp;
1965 	int			rval = 0;
1966 
1967 	struct {
1968 		uint8_t		*rsp_info;
1969 		uint8_t		*req_sense_data;
1970 		uint32_t	residual_length;
1971 		uint32_t	fcp_residual_length;
1972 		uint32_t	rsp_info_length;
1973 		uint32_t	req_sense_length;
1974 		uint16_t	comp_status;
1975 		uint8_t		state_flags_l;
1976 		uint8_t		state_flags_h;
1977 		uint8_t		scsi_status_l;
1978 		uint8_t		scsi_status_h;
1979 	} sts;
1980 
1981 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1982 
1983 	if (CFG_IST(ha, CFG_CTRL_242581)) {
1984 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
1985 
1986 		/* Setup status. */
1987 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1988 		    &pkt24->comp_status);
1989 		sts.scsi_status_l = pkt24->scsi_status_l;
1990 		sts.scsi_status_h = pkt24->scsi_status_h;
1991 
1992 		/* Setup firmware residuals. */
1993 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
1994 		    ddi_get32(ha->hba_buf.acc_handle,
1995 		    (uint32_t *)&pkt24->residual_length) : 0;
1996 
1997 		/* Setup FCP residuals. */
1998 		sts.fcp_residual_length = sts.scsi_status_h &
1999 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2000 		    ddi_get32(ha->hba_buf.acc_handle,
2001 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2002 
2003 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2004 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2005 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2006 
2007 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2008 			    sts.residual_length,
2009 			    pkt24->fcp_rsp_residual_count);
2010 			sts.scsi_status_h = (uint8_t)
2011 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2012 		}
2013 
2014 		/* Setup state flags. */
2015 		sts.state_flags_l = pkt24->state_flags_l;
2016 		sts.state_flags_h = pkt24->state_flags_h;
2017 
2018 		if (sp->fcp->fcp_data_len &&
2019 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2020 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2021 			sts.state_flags_h = (uint8_t)
2022 			    (sts.state_flags_h | SF_GOT_BUS |
2023 			    SF_GOT_TARGET | SF_SENT_CMD |
2024 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2025 		} else {
2026 			sts.state_flags_h = (uint8_t)
2027 			    (sts.state_flags_h | SF_GOT_BUS |
2028 			    SF_GOT_TARGET | SF_SENT_CMD |
2029 			    SF_GOT_STATUS);
2030 		}
2031 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2032 			sts.state_flags_l = (uint8_t)
2033 			    (sts.state_flags_l | SF_DATA_OUT);
2034 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2035 			sts.state_flags_l = (uint8_t)
2036 			    (sts.state_flags_l | SF_DATA_IN);
2037 		}
2038 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2039 			sts.state_flags_l = (uint8_t)
2040 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2041 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2042 			sts.state_flags_l = (uint8_t)
2043 			    (sts.state_flags_l | SF_ORDERED_Q);
2044 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2045 			sts.state_flags_l = (uint8_t)
2046 			    (sts.state_flags_l | SF_SIMPLE_Q);
2047 		}
2048 
2049 		/* Setup FCP response info. */
2050 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2051 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2052 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2053 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2054 			if (sts.rsp_info_length >
2055 			    sizeof (struct fcp_rsp_info)) {
2056 				sts.rsp_info_length =
2057 				    sizeof (struct fcp_rsp_info);
2058 			}
2059 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2060 				ql_chg_endian(sts.rsp_info + cnt, 4);
2061 			}
2062 		} else {
2063 			sts.rsp_info_length = 0;
2064 		}
2065 
2066 		/* Setup sense data. */
2067 		sts.req_sense_data =
2068 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2069 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2070 			sts.req_sense_length =
2071 			    ddi_get32(ha->hba_buf.acc_handle,
2072 			    (uint32_t *)&pkt24->fcp_sense_length);
2073 			sts.state_flags_h = (uint8_t)
2074 			    (sts.state_flags_h | SF_ARQ_DONE);
2075 			sense_sz = (uint32_t)
2076 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2077 			    (uintptr_t)sts.req_sense_data);
2078 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2079 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2080 			}
2081 		} else {
2082 			sts.req_sense_length = 0;
2083 		}
2084 	} else {
2085 		/* Setup status. */
2086 		sts.comp_status = (uint16_t)ddi_get16(
2087 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2088 		sts.scsi_status_l = pkt23->scsi_status_l;
2089 		sts.scsi_status_h = pkt23->scsi_status_h;
2090 
2091 		/* Setup firmware residuals. */
2092 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2093 		    ddi_get32(ha->hba_buf.acc_handle,
2094 		    (uint32_t *)&pkt23->residual_length) : 0;
2095 
2096 		/* Setup FCP residuals. */
2097 		sts.fcp_residual_length = sts.scsi_status_h &
2098 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2099 		    sts.residual_length : 0;
2100 
2101 		/* Setup state flags. */
2102 		sts.state_flags_l = pkt23->state_flags_l;
2103 		sts.state_flags_h = pkt23->state_flags_h;
2104 
2105 		/* Setup FCP response info. */
2106 		sts.rsp_info = &pkt23->rsp_info[0];
2107 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2108 			sts.rsp_info_length = ddi_get16(
2109 			    ha->hba_buf.acc_handle,
2110 			    (uint16_t *)&pkt23->rsp_info_length);
2111 			if (sts.rsp_info_length >
2112 			    sizeof (struct fcp_rsp_info)) {
2113 				sts.rsp_info_length =
2114 				    sizeof (struct fcp_rsp_info);
2115 			}
2116 		} else {
2117 			sts.rsp_info_length = 0;
2118 		}
2119 
2120 		/* Setup sense data. */
2121 		sts.req_sense_data = &pkt23->req_sense_data[0];
2122 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2123 		    ddi_get16(ha->hba_buf.acc_handle,
2124 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2125 	}
2126 
2127 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2128 
2129 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2130 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2131 	    sizeof (fcp_rsp_t));
2132 
2133 	tq = sp->lun_queue->target_queue;
2134 
2135 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2136 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2137 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2138 	}
2139 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2140 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2141 	}
2142 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2143 		fcpr->fcp_u.fcp_status.resid_over = 1;
2144 	}
2145 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2146 		fcpr->fcp_u.fcp_status.resid_under = 1;
2147 	}
2148 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2149 
2150 	/* Set ISP completion status */
2151 	sp->pkt->pkt_reason = sts.comp_status;
2152 
2153 	/* Update statistics. */
2154 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2155 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2156 
2157 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2158 		if (sense_sz > sts.rsp_info_length) {
2159 			sense_sz = sts.rsp_info_length;
2160 		}
2161 
2162 		/* copy response information data. */
2163 		if (sense_sz) {
2164 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2165 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2166 		}
2167 		fcpr->fcp_response_len = sense_sz;
2168 
2169 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2170 		    fcpr->fcp_response_len);
2171 
2172 		switch (*(sts.rsp_info + 3)) {
2173 		case FCP_NO_FAILURE:
2174 			break;
2175 		case FCP_DL_LEN_MISMATCH:
2176 			ha->adapter_stats->d_stats[lobyte(
2177 			    tq->loop_id)].dl_len_mismatches++;
2178 			break;
2179 		case FCP_CMND_INVALID:
2180 			break;
2181 		case FCP_DATA_RO_MISMATCH:
2182 			ha->adapter_stats->d_stats[lobyte(
2183 			    tq->loop_id)].data_ro_mismatches++;
2184 			break;
2185 		case FCP_TASK_MGMT_NOT_SUPPTD:
2186 			break;
2187 		case FCP_TASK_MGMT_FAILED:
2188 			ha->adapter_stats->d_stats[lobyte(
2189 			    tq->loop_id)].task_mgmt_failures++;
2190 			break;
2191 		default:
2192 			break;
2193 		}
2194 	} else {
2195 		/*
2196 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2197 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2198 		 */
2199 		fcpr->fcp_response_len = 0;
2200 	}
2201 
2202 	/* Set reset status received. */
2203 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2204 		rval |= BIT_0;
2205 	}
2206 
2207 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2208 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2209 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2210 	    ha->task_daemon_flags & LOOP_DOWN) {
2211 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2212 		    tq->d_id.b24, sp->lun_queue->lun_no);
2213 
2214 		/* Set retry status. */
2215 		sp->flags |= SRB_RETRY;
2216 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2217 	    tq->port_down_retry_count != 0 &&
2218 	    (sts.comp_status == CS_INCOMPLETE ||
2219 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2220 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2221 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2222 	    sts.comp_status == CS_PORT_BUSY)) {
2223 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2224 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2225 		    tq->port_down_retry_count);
2226 
2227 		/* Set retry status. */
2228 		sp->flags |= SRB_RETRY;
2229 
2230 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2231 			/* Acquire device queue lock. */
2232 			DEVICE_QUEUE_LOCK(tq);
2233 
2234 			tq->flags |= TQF_QUEUE_SUSPENDED;
2235 
2236 			/* Decrement port down count. */
2237 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2238 				tq->port_down_retry_count--;
2239 			}
2240 
2241 			DEVICE_QUEUE_UNLOCK(tq);
2242 
2243 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2244 			    == 0 &&
2245 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2246 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2247 				sp->ha->adapter_stats->d_stats[lobyte(
2248 				    tq->loop_id)].logouts_recvd++;
2249 				ql_send_logo(sp->ha, tq, done_q);
2250 			}
2251 
2252 			ADAPTER_STATE_LOCK(ha);
2253 			if (ha->port_retry_timer == 0) {
2254 				if ((ha->port_retry_timer =
2255 				    ha->port_down_retry_delay) == 0) {
2256 					*set_flags |=
2257 					    PORT_RETRY_NEEDED;
2258 				}
2259 			}
2260 			ADAPTER_STATE_UNLOCK(ha);
2261 		}
2262 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2263 	    (sts.comp_status == CS_RESET ||
2264 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2265 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2266 		if (sts.comp_status == CS_RESET) {
2267 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2268 			    tq->d_id.b24, sp->lun_queue->lun_no);
2269 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2270 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2271 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2272 			    tq->qfull_retry_count);
2273 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2274 				tq->flags |= TQF_QUEUE_SUSPENDED;
2275 
2276 				tq->qfull_retry_count--;
2277 
2278 				ADAPTER_STATE_LOCK(ha);
2279 				if (ha->port_retry_timer == 0) {
2280 					if ((ha->port_retry_timer =
2281 					    ha->qfull_retry_delay) ==
2282 					    0) {
2283 						*set_flags |=
2284 						    PORT_RETRY_NEEDED;
2285 					}
2286 				}
2287 				ADAPTER_STATE_UNLOCK(ha);
2288 			}
2289 		} else {
2290 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2291 			    tq->d_id.b24, sp->lun_queue->lun_no);
2292 		}
2293 
2294 		/* Set retry status. */
2295 		sp->flags |= SRB_RETRY;
2296 	} else {
2297 		fcpr->fcp_resid =
2298 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2299 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2300 
2301 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2302 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2303 
2304 			if (sts.scsi_status_l == STATUS_CHECK) {
2305 				sp->pkt->pkt_reason = CS_COMPLETE;
2306 			} else {
2307 				EL(ha, "transport error - "
2308 				    "underrun & invalid resid\n");
2309 				EL(ha, "ssh=%xh, ssl=%xh\n",
2310 				    sts.scsi_status_h, sts.scsi_status_l);
2311 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2312 			}
2313 		}
2314 
2315 		/* Ignore firmware underrun error. */
2316 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2317 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2318 		    (sts.scsi_status_l != STATUS_CHECK &&
2319 		    sts.scsi_status_l != STATUS_GOOD))) {
2320 			sp->pkt->pkt_reason = CS_COMPLETE;
2321 		}
2322 
2323 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2324 			ha->xioctl->DeviceErrorCount++;
2325 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2326 			    "\n", sts.comp_status, tq->d_id.b24,
2327 			    sp->lun_queue->lun_no);
2328 		}
2329 
2330 		/* Set target request sense data. */
2331 		if (sts.scsi_status_l == STATUS_CHECK) {
2332 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2333 
2334 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2335 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2336 				    sts.req_sense_data[2] !=
2337 				    KEY_UNIT_ATTENTION) {
2338 					ha->xioctl->DeviceErrorCount++;
2339 				}
2340 
2341 				sense_sz = sts.req_sense_length;
2342 
2343 				/* Insure data does not exceed buf. */
2344 				if (sp->pkt->pkt_rsplen <=
2345 				    (uint32_t)sizeof (fcp_rsp_t) +
2346 				    fcpr->fcp_response_len) {
2347 					sp->request_sense_length = 0;
2348 				} else {
2349 					sp->request_sense_length = (uint32_t)
2350 					    (sp->pkt->pkt_rsplen -
2351 					    sizeof (fcp_rsp_t) -
2352 					    fcpr->fcp_response_len);
2353 				}
2354 
2355 				if (sense_sz <
2356 				    sp->request_sense_length) {
2357 					sp->request_sense_length =
2358 					    sense_sz;
2359 				}
2360 
2361 				sp->request_sense_ptr = (caddr_t)rsp;
2362 
2363 				sense_sz = (uint32_t)
2364 				    (((uintptr_t)pkt23 +
2365 				    sizeof (sts_entry_t)) -
2366 				    (uintptr_t)sts.req_sense_data);
2367 				if (sp->request_sense_length <
2368 				    sense_sz) {
2369 					sense_sz =
2370 					    sp->request_sense_length;
2371 				}
2372 
2373 				fcpr->fcp_sense_len = sense_sz;
2374 
2375 				/* Move sense data. */
2376 				ddi_rep_get8(ha->hba_buf.acc_handle,
2377 				    (uint8_t *)sp->request_sense_ptr,
2378 				    sts.req_sense_data,
2379 				    (size_t)sense_sz,
2380 				    DDI_DEV_AUTOINCR);
2381 
2382 				sp->request_sense_ptr += sense_sz;
2383 				sp->request_sense_length -= sense_sz;
2384 				if (sp->request_sense_length != 0) {
2385 					ha->status_srb = sp;
2386 				}
2387 			}
2388 
2389 			if (sense_sz != 0) {
2390 				EL(sp->ha, "check condition sense data, "
2391 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2392 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2393 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2394 				    sp->lun_queue->lun_no,
2395 				    sts.req_sense_data[0],
2396 				    sts.req_sense_data[1],
2397 				    sts.req_sense_data[2],
2398 				    sts.req_sense_data[3],
2399 				    sts.req_sense_data[4],
2400 				    sts.req_sense_data[5],
2401 				    sts.req_sense_data[6],
2402 				    sts.req_sense_data[7],
2403 				    sts.req_sense_data[8],
2404 				    sts.req_sense_data[9],
2405 				    sts.req_sense_data[10],
2406 				    sts.req_sense_data[11],
2407 				    sts.req_sense_data[12],
2408 				    sts.req_sense_data[13],
2409 				    sts.req_sense_data[14],
2410 				    sts.req_sense_data[15],
2411 				    sts.req_sense_data[16],
2412 				    sts.req_sense_data[17]);
2413 			} else {
2414 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2415 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2416 			}
2417 		}
2418 	}
2419 
2420 	/* Set completed status. */
2421 	sp->flags |= SRB_ISP_COMPLETED;
2422 
2423 	/* Place command on done queue. */
2424 	if (ha->status_srb == NULL) {
2425 		ql_add_link_b(done_q, &sp->cmd);
2426 	}
2427 
2428 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2429 
2430 	return (rval);
2431 }
2432 
2433 /*
2434  * ql_status_cont_entry
2435  *	Processes status continuation entry.
2436  *
2437  * Input:
2438  *	ha:		adapter state pointer.
2439  *	pkt:		entry pointer.
2440  *	done_q:		done queue pointer.
2441  *	set_flags:	task daemon flags to set.
2442  *	reset_flags:	task daemon flags to reset.
2443  *
2444  * Context:
2445  *	Interrupt or Kernel context, no mailbox commands allowed.
2446  */
2447 /* ARGSUSED */
2448 static void
2449 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2450     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2451 {
2452 	uint32_t	sense_sz, index;
2453 	ql_srb_t	*sp = ha->status_srb;
2454 
2455 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2456 
2457 	if (sp != NULL && sp->request_sense_length) {
2458 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2459 			sense_sz = sizeof (pkt->req_sense_data);
2460 		} else {
2461 			sense_sz = sp->request_sense_length;
2462 		}
2463 
2464 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2465 			for (index = 0; index < sense_sz; index += 4) {
2466 				ql_chg_endian((uint8_t *)
2467 				    &pkt->req_sense_data[0] + index, 4);
2468 			}
2469 		}
2470 
2471 		/* Move sense data. */
2472 		ddi_rep_get8(ha->hba_buf.acc_handle,
2473 		    (uint8_t *)sp->request_sense_ptr,
2474 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2475 		    DDI_DEV_AUTOINCR);
2476 
2477 		sp->request_sense_ptr += sense_sz;
2478 		sp->request_sense_length -= sense_sz;
2479 
2480 		/* Place command on done queue. */
2481 		if (sp->request_sense_length == 0) {
2482 			ql_add_link_b(done_q, &sp->cmd);
2483 			ha->status_srb = NULL;
2484 		}
2485 	}
2486 
2487 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2488 }
2489 
2490 /*
2491  * ql_ip_entry
2492  *	Processes received ISP IP entry.
2493  *
2494  * Input:
2495  *	ha:		adapter state pointer.
2496  *	pkt:		entry pointer.
2497  *	done_q:		done queue pointer.
2498  *	set_flags:	task daemon flags to set.
2499  *	reset_flags:	task daemon flags to reset.
2500  *
2501  * Context:
2502  *	Interrupt or Kernel context, no mailbox commands allowed.
2503  */
2504 /* ARGSUSED */
2505 static void
2506 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2507     uint32_t *set_flags, uint32_t *reset_flags)
2508 {
2509 	ql_srb_t	*sp;
2510 	uint32_t	index, cnt;
2511 	ql_tgt_t	*tq;
2512 
2513 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2514 
2515 	/* Get handle. */
2516 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2517 	index = cnt & OSC_INDEX_MASK;
2518 
2519 	/* Validate handle. */
2520 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
2521 	    NULL;
2522 
2523 	if (sp != NULL && sp->handle == cnt) {
2524 		ha->outstanding_cmds[index] = NULL;
2525 		sp->handle = 0;
2526 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2527 		tq = sp->lun_queue->target_queue;
2528 
2529 		/* Set ISP completion status */
2530 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2531 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2532 
2533 			sp->pkt->pkt_reason = ddi_get16(
2534 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
2535 		} else {
2536 			sp->pkt->pkt_reason = ddi_get16(
2537 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2538 		}
2539 
2540 		if (ha->task_daemon_flags & LOOP_DOWN) {
2541 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2542 			    tq->d_id.b24);
2543 
2544 			/* Set retry status. */
2545 			sp->flags |= SRB_RETRY;
2546 
2547 		} else if (tq->port_down_retry_count &&
2548 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2549 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2550 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2551 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2552 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2553 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2554 			    sp->pkt->pkt_reason, tq->d_id.b24,
2555 			    tq->port_down_retry_count);
2556 
2557 			/* Set retry status. */
2558 			sp->flags |= SRB_RETRY;
2559 
2560 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2561 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2562 				ha->adapter_stats->d_stats[lobyte(
2563 				    tq->loop_id)].logouts_recvd++;
2564 				ql_send_logo(ha, tq, done_q);
2565 			}
2566 
2567 			/* Acquire device queue lock. */
2568 			DEVICE_QUEUE_LOCK(tq);
2569 
2570 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2571 				tq->flags |= TQF_QUEUE_SUSPENDED;
2572 
2573 				tq->port_down_retry_count--;
2574 
2575 				ADAPTER_STATE_LOCK(ha);
2576 				if (ha->port_retry_timer == 0) {
2577 					if ((ha->port_retry_timer =
2578 					    ha->port_down_retry_delay) == 0) {
2579 						*set_flags |=
2580 						    PORT_RETRY_NEEDED;
2581 					}
2582 				}
2583 				ADAPTER_STATE_UNLOCK(ha);
2584 			}
2585 
2586 			/* Release device queue specific lock. */
2587 			DEVICE_QUEUE_UNLOCK(tq);
2588 
2589 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2590 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2591 
2592 			/* Set retry status. */
2593 			sp->flags |= SRB_RETRY;
2594 		} else {
2595 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2596 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2597 				    sp->pkt->pkt_reason, tq->d_id.b24);
2598 			}
2599 		}
2600 
2601 		/* Set completed status. */
2602 		sp->flags |= SRB_ISP_COMPLETED;
2603 
2604 		ql_add_link_b(done_q, &sp->cmd);
2605 
2606 	} else {
2607 		if (sp == NULL) {
2608 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
2609 		} else {
2610 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
2611 			    cnt, sp->handle);
2612 		}
2613 
2614 		(void) ql_binary_fw_dump(ha, FALSE);
2615 
2616 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2617 		    ABORT_ISP_ACTIVE))) {
2618 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
2619 			*set_flags |= ISP_ABORT_NEEDED;
2620 		}
2621 	}
2622 
2623 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2624 }
2625 
2626 /*
2627  * ql_ip_rcv_entry
2628  *	Processes received ISP IP buffers entry.
2629  *
2630  * Input:
2631  *	ha:		adapter state pointer.
2632  *	pkt:		entry pointer.
2633  *	done_q:		done queue pointer.
2634  *	set_flags:	task daemon flags to set.
2635  *	reset_flags:	task daemon flags to reset.
2636  *
2637  * Context:
2638  *	Interrupt or Kernel context, no mailbox commands allowed.
2639  */
2640 /* ARGSUSED */
2641 static void
2642 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2643     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2644 {
2645 	port_id_t	s_id;
2646 	uint16_t	index;
2647 	uint8_t		cnt;
2648 	ql_tgt_t	*tq;
2649 
2650 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2651 
2652 	/* Locate device queue. */
2653 	s_id.b.al_pa = pkt->s_id[0];
2654 	s_id.b.area = pkt->s_id[1];
2655 	s_id.b.domain = pkt->s_id[2];
2656 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2657 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2658 		return;
2659 	}
2660 
2661 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2662 	    &pkt->seq_length);
2663 	tq->ub_total_seg_cnt = pkt->segment_count;
2664 	tq->ub_seq_id = ++ha->ub_seq_id;
2665 	tq->ub_seq_cnt = 0;
2666 	tq->ub_frame_ro = 0;
2667 	tq->ub_loop_id = pkt->loop_id;
2668 	ha->rcv_dev_q = tq;
2669 
2670 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2671 	    tq->ub_total_seg_cnt; cnt++) {
2672 
2673 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2674 		    &pkt->buffer_handle[cnt]);
2675 
2676 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2677 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2678 			*set_flags |= ISP_ABORT_NEEDED;
2679 			break;
2680 		}
2681 	}
2682 
2683 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2684 }
2685 
2686 /*
2687  * ql_ip_rcv_cont_entry
2688  *	Processes received ISP IP buffers continuation entry.
2689  *
2690  * Input:
2691  *	ha:		adapter state pointer.
2692  *	pkt:		entry pointer.
2693  *	done_q:		done queue pointer.
2694  *	set_flags:	task daemon flags to set.
2695  *	reset_flags:	task daemon flags to reset.
2696  *
2697  * Context:
2698  *	Interrupt or Kernel context, no mailbox commands allowed.
2699  */
2700 /* ARGSUSED */
2701 static void
2702 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2703     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2704 {
2705 	uint16_t	index;
2706 	uint8_t		cnt;
2707 	ql_tgt_t	*tq;
2708 
2709 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2710 
2711 	if ((tq = ha->rcv_dev_q) == NULL) {
2712 		EL(ha, "No IP receive device\n");
2713 		return;
2714 	}
2715 
2716 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2717 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2718 
2719 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2720 		    &pkt->buffer_handle[cnt]);
2721 
2722 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2723 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2724 			*set_flags |= ISP_ABORT_NEEDED;
2725 			break;
2726 		}
2727 	}
2728 
2729 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2730 }
2731 
2732 /*
2733  * ip_rcv_24xx_entry_t
2734  *	Processes received ISP24xx IP buffers entry.
2735  *
2736  * Input:
2737  *	ha:		adapter state pointer.
2738  *	pkt:		entry pointer.
2739  *	done_q:		done queue pointer.
2740  *	set_flags:	task daemon flags to set.
2741  *	reset_flags:	task daemon flags to reset.
2742  *
2743  * Context:
2744  *	Interrupt or Kernel context, no mailbox commands allowed.
2745  */
2746 /* ARGSUSED */
2747 static void
2748 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2749     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2750 {
2751 	port_id_t	s_id;
2752 	uint16_t	index;
2753 	uint8_t		cnt;
2754 	ql_tgt_t	*tq;
2755 
2756 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2757 
2758 	/* Locate device queue. */
2759 	s_id.b.al_pa = pkt->s_id[0];
2760 	s_id.b.area = pkt->s_id[1];
2761 	s_id.b.domain = pkt->s_id[2];
2762 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2763 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2764 		return;
2765 	}
2766 
2767 	if (tq->ub_total_seg_cnt == 0) {
2768 		tq->ub_sequence_length = (uint16_t)ddi_get16(
2769 		    ha->hba_buf.acc_handle, &pkt->seq_length);
2770 		tq->ub_total_seg_cnt = pkt->segment_count;
2771 		tq->ub_seq_id = ++ha->ub_seq_id;
2772 		tq->ub_seq_cnt = 0;
2773 		tq->ub_frame_ro = 0;
2774 		tq->ub_loop_id = (uint16_t)ddi_get16(
2775 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2776 	}
2777 
2778 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2779 	    tq->ub_total_seg_cnt; cnt++) {
2780 
2781 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2782 		    &pkt->buffer_handle[cnt]);
2783 
2784 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2785 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2786 			*set_flags |= ISP_ABORT_NEEDED;
2787 			break;
2788 		}
2789 	}
2790 
2791 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2792 }
2793 
2794 /*
2795  * ql_ms_entry
2796  *	Processes received Name/Management/CT Pass-Through entry.
2797  *
2798  * Input:
2799  *	ha:		adapter state pointer.
2800  *	pkt23:		entry pointer.
2801  *	done_q:		done queue pointer.
2802  *	set_flags:	task daemon flags to set.
2803  *	reset_flags:	task daemon flags to reset.
2804  *
2805  * Context:
2806  *	Interrupt or Kernel context, no mailbox commands allowed.
2807  */
2808 /* ARGSUSED */
2809 static void
2810 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2811     uint32_t *set_flags, uint32_t *reset_flags)
2812 {
2813 	ql_srb_t		*sp;
2814 	uint32_t		index, cnt;
2815 	ql_tgt_t		*tq;
2816 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
2817 
2818 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2819 
2820 	/* Get handle. */
2821 	cnt = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2822 	index = cnt & OSC_INDEX_MASK;
2823 
2824 	/* Validate handle. */
2825 	sp = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
2826 	    NULL;
2827 
2828 	if (sp != NULL && sp->handle == cnt) {
2829 		if (!(sp->flags & SRB_MS_PKT)) {
2830 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
2831 			    sp->flags);
2832 			*set_flags |= ISP_ABORT_NEEDED;
2833 			return;
2834 		}
2835 
2836 		ha->outstanding_cmds[index] = NULL;
2837 		sp->handle = 0;
2838 		sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2839 		tq = sp->lun_queue->target_queue;
2840 
2841 		/* Set ISP completion status */
2842 		if (CFG_IST(ha, CFG_CTRL_242581)) {
2843 			sp->pkt->pkt_reason = ddi_get16(
2844 			    ha->hba_buf.acc_handle, &pkt24->status);
2845 		} else {
2846 			sp->pkt->pkt_reason = ddi_get16(
2847 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2848 		}
2849 
2850 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
2851 		    sp->retry_count) {
2852 			EL(ha, "Resouce Unavailable Retry = %d\n",
2853 			    sp->retry_count);
2854 
2855 			/* Set retry status. */
2856 			sp->retry_count--;
2857 			sp->flags |= SRB_RETRY;
2858 
2859 			/* Acquire device queue lock. */
2860 			DEVICE_QUEUE_LOCK(tq);
2861 
2862 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
2863 				tq->flags |= TQF_QUEUE_SUSPENDED;
2864 
2865 				ADAPTER_STATE_LOCK(ha);
2866 				if (ha->port_retry_timer == 0) {
2867 					ha->port_retry_timer = 2;
2868 				}
2869 				ADAPTER_STATE_UNLOCK(ha);
2870 			}
2871 
2872 			/* Release device queue specific lock. */
2873 			DEVICE_QUEUE_UNLOCK(tq);
2874 
2875 		} else if (tq->port_down_retry_count &&
2876 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2877 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2878 			EL(ha, "Port Down Retry\n");
2879 
2880 			/* Set retry status. */
2881 			sp->flags |= SRB_RETRY;
2882 
2883 			/* Acquire device queue lock. */
2884 			DEVICE_QUEUE_LOCK(tq);
2885 
2886 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2887 				tq->flags |= TQF_QUEUE_SUSPENDED;
2888 
2889 				tq->port_down_retry_count--;
2890 
2891 				ADAPTER_STATE_LOCK(ha);
2892 				if (ha->port_retry_timer == 0) {
2893 					if ((ha->port_retry_timer =
2894 					    ha->port_down_retry_delay) == 0) {
2895 						*set_flags |=
2896 						    PORT_RETRY_NEEDED;
2897 					}
2898 				}
2899 				ADAPTER_STATE_UNLOCK(ha);
2900 			}
2901 
2902 			/* Release device queue specific lock. */
2903 			DEVICE_QUEUE_UNLOCK(tq);
2904 
2905 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2906 			EL(ha, "Reset Retry\n");
2907 
2908 			/* Set retry status. */
2909 			sp->flags |= SRB_RETRY;
2910 
2911 		} else if (CFG_IST(ha, CFG_CTRL_242581) &&
2912 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
2913 			cnt = ddi_get32(ha->hba_buf.acc_handle,
2914 			    &pkt24->resp_byte_count);
2915 			if (cnt < sizeof (fc_ct_header_t)) {
2916 				EL(ha, "Data underrun\n");
2917 			} else {
2918 				sp->pkt->pkt_reason = CS_COMPLETE;
2919 			}
2920 
2921 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
2922 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
2923 		}
2924 
2925 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
2926 			/*EMPTY*/
2927 			QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
2928 			    ha->instance, sp->pkt->pkt_cmd[8],
2929 			    sp->pkt->pkt_cmd[9]);
2930 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
2931 		}
2932 
2933 		/* For nameserver restore command, management change header. */
2934 		if ((sp->flags & SRB_RETRY) == 0) {
2935 			tq->d_id.b24 == 0xfffffc ?
2936 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
2937 			    sp->pkt->pkt_cmd, B_TRUE) :
2938 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
2939 			    sp->pkt->pkt_resp, B_TRUE);
2940 		}
2941 
2942 		/* Set completed status. */
2943 		sp->flags |= SRB_ISP_COMPLETED;
2944 
2945 		/* Place command on done queue. */
2946 		ql_add_link_b(done_q, &sp->cmd);
2947 
2948 	} else {
2949 		if (sp == NULL) {
2950 			EL(ha, "unknown IOCB handle=%xh\n", cnt);
2951 		} else {
2952 			EL(ha, "mismatch IOCB handle pkt=%xh, sp=%xh\n",
2953 			    cnt, sp->handle);
2954 		}
2955 
2956 		(void) ql_binary_fw_dump(ha, FALSE);
2957 
2958 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
2959 		    ABORT_ISP_ACTIVE))) {
2960 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
2961 			*set_flags |= ISP_ABORT_NEEDED;
2962 		}
2963 	}
2964 
2965 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2966 }
2967 
2968 /*
2969  * ql_report_id_entry
2970  *	Processes received Name/Management/CT Pass-Through entry.
2971  *
2972  * Input:
2973  *	ha:		adapter state pointer.
2974  *	pkt23:		entry pointer.
2975  *	done_q:		done queue pointer.
2976  *	set_flags:	task daemon flags to set.
2977  *	reset_flags:	task daemon flags to reset.
2978  *
2979  * Context:
2980  *	Interrupt or Kernel context, no mailbox commands allowed.
2981  */
2982 /* ARGSUSED */
2983 static void
2984 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
2985     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2986 {
2987 	ql_adapter_state_t	*vha;
2988 
2989 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2990 
2991 	EL(ha, "format=%d, vp=%d, status=%d\n",
2992 	    pkt->format, pkt->vp_index, pkt->status);
2993 
2994 	if (pkt->format == 1) {
2995 		/* Locate port state structure. */
2996 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
2997 			if (vha->vp_index == pkt->vp_index) {
2998 				break;
2999 			}
3000 		}
3001 		if (vha != NULL && vha->vp_index != 0 &&
3002 		    (pkt->status == CS_COMPLETE ||
3003 		    pkt->status == CS_PORT_ID_CHANGE)) {
3004 			*set_flags |= LOOP_RESYNC_NEEDED;
3005 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3006 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3007 			TASK_DAEMON_LOCK(ha);
3008 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3009 			vha->task_daemon_flags &= ~LOOP_DOWN;
3010 			TASK_DAEMON_UNLOCK(ha);
3011 		}
3012 	}
3013 
3014 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3015 }
3016 
3017 /*
3018  * ql_els_entry
3019  *	Processes received ELS Pass-Through entry.
3020  *
3021  * Input:
3022  *	ha:		adapter state pointer.
3023  *	pkt23:		entry pointer.
3024  *	done_q:		done queue pointer.
3025  *	set_flags:	task daemon flags to set.
3026  *	reset_flags:	task daemon flags to reset.
3027  *
3028  * Context:
3029  *	Interrupt or Kernel context, no mailbox commands allowed.
3030  */
3031 /* ARGSUSED */
3032 static void
3033 ql_els_passthru_entry(ql_adapter_state_t *ha, els_passthru_entry_rsp_t *rsp,
3034     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3035 {
3036 	ql_tgt_t	*tq;
3037 	port_id_t	d_id, s_id;
3038 	ql_srb_t	*srb;
3039 	uint32_t	cnt, index;
3040 
3041 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3042 	/* Get handle. */
3043 	cnt = ddi_get32(ha->hba_buf.acc_handle, &rsp->handle);
3044 	index = cnt & OSC_INDEX_MASK;
3045 
3046 	/* Validate handle. */
3047 	srb = index < MAX_OUTSTANDING_COMMANDS ? ha->outstanding_cmds[index] :
3048 	    NULL;
3049 
3050 	(void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3051 	    DDI_DMA_SYNC_FORKERNEL);
3052 
3053 	if (srb != NULL && srb->handle == cnt) {
3054 		if (!(srb->flags & SRB_ELS_PKT)) {
3055 			EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed",
3056 			    srb->flags);
3057 			*set_flags |= ISP_ABORT_NEEDED;
3058 			return;
3059 		}
3060 		ha->outstanding_cmds[index] = NULL;
3061 		srb->handle = 0;
3062 		srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3063 
3064 		/* Set ISP completion status */
3065 		srb->pkt->pkt_reason = ddi_get16(
3066 		    ha->hba_buf.acc_handle, &rsp->comp_status);
3067 
3068 		if (srb->pkt->pkt_reason != CS_COMPLETE) {
3069 			la_els_rjt_t	rjt;
3070 			EL(ha, "status err=%xh\n", srb->pkt->pkt_reason);
3071 
3072 			if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3073 				EL(ha, "e1=%xh e2=%xh\n",
3074 				    rsp->error_subcode1, rsp->error_subcode2);
3075 			}
3076 
3077 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3078 
3079 			/* Build RJT in the response. */
3080 			rjt.ls_code.ls_code = LA_ELS_RJT;
3081 			rjt.reason = FC_REASON_NO_CONNECTION;
3082 
3083 			ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3084 			    (uint8_t *)srb->pkt->pkt_resp,
3085 			    sizeof (rjt), DDI_DEV_AUTOINCR);
3086 
3087 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3088 			srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3089 		}
3090 
3091 		if (srb->pkt->pkt_reason == CS_COMPLETE) {
3092 			uint8_t		opcode;
3093 			uint16_t	loop_id;
3094 
3095 			/* Indicate ISP completion */
3096 			srb->flags |= SRB_ISP_COMPLETED;
3097 
3098 			loop_id = ddi_get16(ha->hba_buf.acc_handle,
3099 			    &rsp->n_port_hdl);
3100 
3101 			if (ha->topology & QL_N_PORT) {
3102 				/* create a target Q if there isn't one */
3103 				tq = ql_loop_id_to_queue(ha, loop_id);
3104 				if (tq == NULL) {
3105 					d_id.b.al_pa = rsp->d_id_7_0;
3106 					d_id.b.area = rsp->d_id_15_8;
3107 					d_id.b.domain = rsp->d_id_23_16;
3108 					/* Acquire adapter state lock. */
3109 					ADAPTER_STATE_LOCK(ha);
3110 
3111 					tq = ql_dev_init(ha, d_id, loop_id);
3112 					EL(ha, " tq = %x\n", tq);
3113 
3114 					ADAPTER_STATE_UNLOCK(ha);
3115 				}
3116 
3117 				/* on plogi success assume the chosen s_id */
3118 				opcode = ddi_get8(ha->hba_buf.acc_handle,
3119 				    &rsp->els_cmd_opcode);
3120 
3121 				EL(ha, "els_cmd_opcode=%x srb->pkt=%x\n",
3122 				    opcode, srb->pkt);
3123 
3124 				if (opcode == LA_ELS_PLOGI) {
3125 					s_id.b.al_pa = rsp->s_id_7_0;
3126 					s_id.b.area = rsp->s_id_15_8;
3127 					s_id.b.domain = rsp->s_id_23_16;
3128 
3129 					ha->d_id.b24 = s_id.b24;
3130 					EL(ha, "Set port's source ID %xh\n",
3131 					    ha->d_id.b24);
3132 				}
3133 			}
3134 			ql_isp_els_handle_rsp_endian(ha, srb);
3135 
3136 			if (ha != srb->ha) {
3137 				EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3138 			}
3139 
3140 			if (tq != NULL) {
3141 				tq->logout_sent = 0;
3142 				tq->flags &= ~TQF_NEED_AUTHENTICATION;
3143 
3144 				if (CFG_IST(ha, CFG_CTRL_242581)) {
3145 					tq->flags |= TQF_IIDMA_NEEDED;
3146 				}
3147 			srb->pkt->pkt_state = FC_PKT_SUCCESS;
3148 			}
3149 		}
3150 		/* invoke the callback */
3151 		ql_awaken_task_daemon(ha, srb, 0, 0);
3152 	} else {
3153 		EL(ha, "unexpected IOCB handle=%xh\n", srb);
3154 
3155 		if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
3156 		    ABORT_ISP_ACTIVE))) {
3157 			EL(ha, "ISP Invalid handle, isp_abort_needed\n");
3158 			*set_flags |= ISP_ABORT_NEEDED;
3159 		}
3160 	}
3161 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3162 }
3163