1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2010 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 #pragma ident	"Copyright 2010 QLogic Corporation; ql_isr.c"
29 
30 /*
31  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32  *
33  * ***********************************************************************
34  * *									**
35  * *				NOTICE					**
36  * *		COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION		**
37  * *			ALL RIGHTS RESERVED				**
38  * *									**
39  * ***********************************************************************
40  *
41  */
42 
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_iocb.h>
47 #include <ql_isr.h>
48 #include <ql_init.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52 
53 /*
54  * Local Function Prototypes.
55  */
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, uint32_t,
57     uint32_t *);
58 static void ql_spurious_intr(ql_adapter_state_t *, int);
59 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint32_t *,
60     uint32_t *, int);
61 static void ql_async_event(ql_adapter_state_t *, uint32_t, ql_head_t *,
62     uint32_t *, uint32_t *, int);
63 static void ql_fast_fcp_post(ql_srb_t *);
64 static void ql_response_pkt(ql_adapter_state_t *, ql_head_t *, uint32_t *,
65     uint32_t *, int);
66 static void ql_error_entry(ql_adapter_state_t *, response_t *, ql_head_t *,
67     uint32_t *, uint32_t *);
68 static int ql_status_entry(ql_adapter_state_t *, sts_entry_t *, ql_head_t *,
69     uint32_t *, uint32_t *);
70 static int ql_24xx_status_entry(ql_adapter_state_t *, sts_24xx_entry_t *,
71     ql_head_t *, uint32_t *, uint32_t *);
72 static int ql_status_error(ql_adapter_state_t *, ql_srb_t *, sts_entry_t *,
73     ql_head_t *, uint32_t *, uint32_t *);
74 static void ql_status_cont_entry(ql_adapter_state_t *, sts_cont_entry_t *,
75     ql_head_t *, uint32_t *, uint32_t *);
76 static void ql_ip_entry(ql_adapter_state_t *, ip_entry_t *, ql_head_t *,
77     uint32_t *, uint32_t *);
78 static void ql_ip_rcv_entry(ql_adapter_state_t *, ip_rcv_entry_t *,
79     ql_head_t *, uint32_t *, uint32_t *);
80 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *,
81     ip_rcv_cont_entry_t *, ql_head_t *, uint32_t *, uint32_t *);
82 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ip_rcv_24xx_entry_t *,
83     ql_head_t *, uint32_t *, uint32_t *);
84 static void ql_ms_entry(ql_adapter_state_t *, ms_entry_t *, ql_head_t *,
85     uint32_t *, uint32_t *);
86 static void ql_report_id_entry(ql_adapter_state_t *, report_id_1_t *,
87     ql_head_t *, uint32_t *, uint32_t *);
88 static void ql_els_passthru_entry(ql_adapter_state_t *,
89     els_passthru_entry_rsp_t *, ql_head_t *, uint32_t *, uint32_t *);
90 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *, uint32_t *,
91     uint32_t *, uint32_t *);
92 static void ql_signal_abort(ql_adapter_state_t *ha, uint32_t *set_flags);
93 
94 /*
95  * Spurious interrupt counter
96  */
97 uint32_t	ql_spurious_cnt = 4;
98 uint32_t	ql_max_intr_loop = 16;
99 
100 /*
101  * ql_isr
102  *	Process all INTX intr types.
103  *
104  * Input:
105  *	arg1:	adapter state pointer.
106  *
107  * Returns:
108  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
109  *
110  * Context:
111  *	Interrupt or Kernel context, no mailbox commands allowed.
112  */
113 /* ARGSUSED */
114 uint_t
ql_isr(caddr_t arg1)115 ql_isr(caddr_t arg1)
116 {
117 	return (ql_isr_aif(arg1, 0));
118 }
119 
120 /*
121  * ql_isr_default
122  *	Process unknown/unvectored intr types
123  *
124  * Input:
125  *	arg1:	adapter state pointer.
126  *	arg2:	interrupt vector.
127  *
128  * Returns:
129  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
130  *
131  * Context:
132  *	Interrupt or Kernel context, no mailbox commands allowed.
133  */
134 /* ARGSUSED */
135 uint_t
ql_isr_default(caddr_t arg1,caddr_t arg2)136 ql_isr_default(caddr_t arg1, caddr_t arg2)
137 {
138 	ql_adapter_state_t	*ha = (void *)arg1;
139 
140 	EL(ha, "isr_default called: idx=%x\n", arg2);
141 	return (ql_isr_aif(arg1, arg2));
142 }
143 
144 /*
145  * ql_isr_aif
146  *	Process mailbox and I/O command completions.
147  *
148  * Input:
149  *	arg:	adapter state pointer.
150  *	intvec:	interrupt vector.
151  *
152  * Returns:
153  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
154  *
155  * Context:
156  *	Interrupt or Kernel context, no mailbox commands allowed.
157  */
158 /* ARGSUSED */
159 uint_t
ql_isr_aif(caddr_t arg,caddr_t intvec)160 ql_isr_aif(caddr_t arg, caddr_t intvec)
161 {
162 	uint16_t		mbx;
163 	uint32_t		stat;
164 	ql_adapter_state_t	*ha = (void *)arg;
165 	uint32_t		set_flags = 0;
166 	uint32_t		reset_flags = 0;
167 	ql_head_t		isr_done_q = {NULL, NULL};
168 	uint_t			rval = DDI_INTR_UNCLAIMED;
169 	int			spurious_intr = 0;
170 	boolean_t		intr = B_FALSE, daemon = B_FALSE;
171 	int			intr_loop = 4;
172 	boolean_t		clear_spurious = B_TRUE;
173 
174 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
175 
176 	QL_PM_LOCK(ha);
177 	if (ha->power_level != PM_LEVEL_D0) {
178 		/*
179 		 * Looks like we are about to go down soon, exit early.
180 		 */
181 		QL_PM_UNLOCK(ha);
182 		QL_PRINT_3(CE_CONT, "(%d): power down exit\n", ha->instance);
183 		return (DDI_INTR_UNCLAIMED);
184 	}
185 	ha->busy++;
186 	QL_PM_UNLOCK(ha);
187 
188 	/* Acquire interrupt lock. */
189 	INTR_LOCK(ha);
190 
191 	if (CFG_IST(ha, CFG_CTRL_2200)) {
192 		while (RD16_IO_REG(ha, istatus) & RISC_INT) {
193 			/* Reset idle timer. */
194 			ha->idle_timer = 0;
195 			rval = DDI_INTR_CLAIMED;
196 			if (intr_loop) {
197 				intr_loop--;
198 			}
199 
200 			/* Special Fast Post 2200. */
201 			stat = 0;
202 			if (ha->task_daemon_flags & FIRMWARE_LOADED &&
203 			    ha->flags & ONLINE) {
204 				ql_srb_t	*sp;
205 
206 				mbx = RD16_IO_REG(ha, mailbox_out[23]);
207 
208 				if ((mbx & 3) == MBX23_SCSI_COMPLETION) {
209 					/* Release mailbox registers. */
210 					WRT16_IO_REG(ha, semaphore, 0);
211 
212 					if (intr_loop) {
213 						WRT16_IO_REG(ha, hccr,
214 						    HC_CLR_RISC_INT);
215 					}
216 
217 					/* Get handle. */
218 					mbx >>= 4;
219 					stat = mbx & OSC_INDEX_MASK;
220 
221 					/* Validate handle. */
222 					sp = stat < MAX_OUTSTANDING_COMMANDS ?
223 					    ha->outstanding_cmds[stat] : NULL;
224 
225 					if (sp != NULL && (sp->handle & 0xfff)
226 					    == mbx) {
227 						ha->outstanding_cmds[stat] =
228 						    NULL;
229 						sp->handle = 0;
230 						sp->flags &=
231 						    ~SRB_IN_TOKEN_ARRAY;
232 
233 						/* Set completed status. */
234 						sp->flags |= SRB_ISP_COMPLETED;
235 
236 						/* Set completion status */
237 						sp->pkt->pkt_reason =
238 						    CS_COMPLETE;
239 
240 						ql_fast_fcp_post(sp);
241 					} else if (mbx !=
242 					    (QL_FCA_BRAND & 0xfff)) {
243 						if (sp == NULL) {
244 							EL(ha, "unknown IOCB"
245 							    " handle=%xh\n",
246 							    mbx);
247 						} else {
248 							EL(ha, "mismatch IOCB"
249 							    " handle pkt=%xh, "
250 							    "sp=%xh\n", mbx,
251 							    sp->handle & 0xfff);
252 						}
253 
254 						(void) ql_binary_fw_dump(ha,
255 						    FALSE);
256 
257 						if (!(ha->task_daemon_flags &
258 						    (ISP_ABORT_NEEDED |
259 						    ABORT_ISP_ACTIVE))) {
260 							EL(ha, "ISP Invalid "
261 							    "handle, "
262 							    "isp_abort_needed"
263 							    "\n");
264 							set_flags |=
265 							    ISP_ABORT_NEEDED;
266 						}
267 					}
268 				}
269 			}
270 
271 			if (stat == 0) {
272 				/* Check for mailbox interrupt. */
273 				mbx = RD16_IO_REG(ha, semaphore);
274 				if (mbx & BIT_0) {
275 					/* Release mailbox registers. */
276 					WRT16_IO_REG(ha, semaphore, 0);
277 
278 					/* Get mailbox data. */
279 					mbx = RD16_IO_REG(ha, mailbox_out[0]);
280 					if (mbx > 0x3fff && mbx < 0x8000) {
281 						ql_mbx_completion(ha, mbx,
282 						    &set_flags, &reset_flags,
283 						    intr_loop);
284 					} else if (mbx > 0x7fff &&
285 					    mbx < 0xc000) {
286 						ql_async_event(ha, mbx,
287 						    &isr_done_q, &set_flags,
288 						    &reset_flags, intr_loop);
289 					} else {
290 						EL(ha, "UNKNOWN interrupt "
291 						    "type\n");
292 						intr = B_TRUE;
293 					}
294 				} else {
295 					ha->isp_rsp_index = RD16_IO_REG(ha,
296 					    resp_in);
297 
298 					if (ha->isp_rsp_index !=
299 					    ha->rsp_ring_index) {
300 						ql_response_pkt(ha,
301 						    &isr_done_q, &set_flags,
302 						    &reset_flags, intr_loop);
303 					} else if (++spurious_intr ==
304 					    MAX_SPURIOUS_INTR) {
305 						/*
306 						 * Process excessive
307 						 * spurious intrrupts
308 						 */
309 						ql_spurious_intr(ha,
310 						    intr_loop);
311 						EL(ha, "excessive spurious "
312 						    "interrupts, "
313 						    "isp_abort_needed\n");
314 						set_flags |= ISP_ABORT_NEEDED;
315 					} else {
316 						intr = B_TRUE;
317 					}
318 				}
319 			}
320 
321 			/* Clear RISC interrupt */
322 			if (intr || intr_loop == 0) {
323 				intr = B_FALSE;
324 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
325 			}
326 
327 			if (set_flags != 0 || reset_flags != 0) {
328 				TASK_DAEMON_LOCK(ha);
329 				ha->task_daemon_flags |= set_flags;
330 				ha->task_daemon_flags &= ~reset_flags;
331 				TASK_DAEMON_UNLOCK(ha);
332 				set_flags = 0;
333 				reset_flags = 0;
334 				daemon = B_TRUE;
335 			}
336 		}
337 	} else {
338 		uint32_t	ql_max_intr_loop_cnt = 0;
339 
340 		if (CFG_IST(ha, CFG_CTRL_8021)) {
341 			ql_8021_clr_hw_intr(ha);
342 			intr_loop = 1;
343 		}
344 		while (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) &&
345 		    (++ql_max_intr_loop_cnt < ql_max_intr_loop)) {
346 
347 			clear_spurious = B_TRUE;	/* assume ok */
348 
349 			/* Capture FW defined interrupt info */
350 			mbx = MSW(stat);
351 
352 			/* Reset idle timer. */
353 			ha->idle_timer = 0;
354 			rval = DDI_INTR_CLAIMED;
355 
356 			if (CFG_IST(ha, CFG_CTRL_8021) &&
357 			    (RD32_IO_REG(ha, nx_risc_int) == 0 ||
358 			    intr_loop == 0)) {
359 				break;
360 			}
361 
362 			if (intr_loop) {
363 				intr_loop--;
364 			}
365 
366 			switch (stat & 0x1ff) {
367 			case ROM_MBX_SUCCESS:
368 			case ROM_MBX_ERR:
369 				ql_mbx_completion(ha, mbx, &set_flags,
370 				    &reset_flags, intr_loop);
371 
372 				/* Release mailbox registers. */
373 				if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
374 					WRT16_IO_REG(ha, semaphore, 0);
375 				}
376 				break;
377 
378 			case MBX_SUCCESS:
379 			case MBX_ERR:
380 				/* Sun FW, Release mailbox registers. */
381 				if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
382 					WRT16_IO_REG(ha, semaphore, 0);
383 				}
384 				ql_mbx_completion(ha, mbx, &set_flags,
385 				    &reset_flags, intr_loop);
386 				break;
387 
388 			case ASYNC_EVENT:
389 				/* Sun FW, Release mailbox registers. */
390 				if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
391 					WRT16_IO_REG(ha, semaphore, 0);
392 				}
393 				ql_async_event(ha, (uint32_t)mbx, &isr_done_q,
394 				    &set_flags, &reset_flags, intr_loop);
395 				break;
396 
397 			case RESP_UPDATE:
398 				if (mbx != ha->rsp_ring_index) {
399 					ha->isp_rsp_index = mbx;
400 					ql_response_pkt(ha, &isr_done_q,
401 					    &set_flags, &reset_flags,
402 					    intr_loop);
403 				} else if (++spurious_intr ==
404 				    ql_spurious_cnt) {
405 					/* Process excessive spurious intr. */
406 					ql_spurious_intr(ha, intr_loop);
407 					EL(ha, "excessive spurious "
408 					    "interrupts, isp_abort_needed\n");
409 					set_flags |= ISP_ABORT_NEEDED;
410 					clear_spurious = B_FALSE;
411 				} else {
412 					QL_PRINT_10(CE_CONT, "(%d): response "
413 					    "ring index same as before\n",
414 					    ha->instance);
415 					intr = B_TRUE;
416 					clear_spurious = B_FALSE;
417 				}
418 				break;
419 
420 			case SCSI_FAST_POST_16:
421 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
422 				ql_async_event(ha, stat, &isr_done_q,
423 				    &set_flags, &reset_flags, intr_loop);
424 				break;
425 
426 			case SCSI_FAST_POST_32:
427 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
428 				ql_async_event(ha, stat, &isr_done_q,
429 				    &set_flags, &reset_flags, intr_loop);
430 				break;
431 
432 			case CTIO_FAST_POST:
433 				stat = (stat & 0xffff0000) |
434 				    MBA_CTIO_COMPLETION;
435 				ql_async_event(ha, stat, &isr_done_q,
436 				    &set_flags, &reset_flags, intr_loop);
437 				break;
438 
439 			case IP_FAST_POST_XMT:
440 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
441 				ql_async_event(ha, stat, &isr_done_q,
442 				    &set_flags, &reset_flags, intr_loop);
443 				break;
444 
445 			case IP_FAST_POST_RCV:
446 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
447 				ql_async_event(ha, stat, &isr_done_q,
448 				    &set_flags, &reset_flags, intr_loop);
449 				break;
450 
451 			case IP_FAST_POST_BRD:
452 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
453 				ql_async_event(ha, stat, &isr_done_q,
454 				    &set_flags, &reset_flags, intr_loop);
455 				break;
456 
457 			case IP_FAST_POST_RCV_ALN:
458 				stat = (stat & 0xffff0000) |
459 				    MBA_IP_HDR_DATA_SPLIT;
460 				ql_async_event(ha, stat, &isr_done_q,
461 				    &set_flags, &reset_flags, intr_loop);
462 				break;
463 
464 			case ATIO_UPDATE:
465 				EL(ha, "unsupported ATIO queue update"
466 				    " interrupt, status=%xh\n", stat);
467 				intr = B_TRUE;
468 				break;
469 
470 			case ATIO_RESP_UPDATE:
471 				EL(ha, "unsupported ATIO response queue "
472 				    "update interrupt, status=%xh\n", stat);
473 				intr = B_TRUE;
474 				break;
475 
476 			default:
477 				ql_handle_uncommon_risc_intr(ha, stat,
478 				    &set_flags);
479 				intr = B_TRUE;
480 				break;
481 			}
482 
483 			/* Clear RISC interrupt */
484 			if (intr || intr_loop == 0) {
485 				intr = B_FALSE;
486 				if (CFG_IST(ha, CFG_CTRL_8021)) {
487 					ql_8021_clr_fw_intr(ha);
488 				} else if (CFG_IST(ha, CFG_CTRL_242581)) {
489 					WRT32_IO_REG(ha, hccr,
490 					    HC24_CLR_RISC_INT);
491 				} else {
492 					WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
493 				}
494 			}
495 
496 			if (set_flags != 0 || reset_flags != 0) {
497 				TASK_DAEMON_LOCK(ha);
498 				ha->task_daemon_flags |= set_flags;
499 				ha->task_daemon_flags &= ~reset_flags;
500 				TASK_DAEMON_UNLOCK(ha);
501 				set_flags = 0;
502 				reset_flags = 0;
503 				daemon = B_TRUE;
504 			}
505 
506 			if (ha->flags & PARITY_ERROR) {
507 				EL(ha, "parity/pause exit\n");
508 				mbx = RD16_IO_REG(ha, hccr); /* PCI posting */
509 				break;
510 			}
511 
512 			if (clear_spurious) {
513 				spurious_intr = 0;
514 			}
515 		}
516 	}
517 
518 	/* Process claimed interrupts during polls. */
519 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
520 		ha->intr_claimed = B_FALSE;
521 		rval = DDI_INTR_CLAIMED;
522 	}
523 
524 	/* Release interrupt lock. */
525 	INTR_UNLOCK(ha);
526 
527 	if (daemon) {
528 		ql_awaken_task_daemon(ha, NULL, 0, 0);
529 	}
530 
531 	if (isr_done_q.first != NULL) {
532 		ql_done(isr_done_q.first);
533 	}
534 
535 	if (rval == DDI_INTR_CLAIMED) {
536 		QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
537 		ha->xioctl->TotalInterrupts++;
538 	} else {
539 		/*EMPTY*/
540 		QL_PRINT_3(CE_CONT, "(%d): interrupt not claimed\n",
541 		    ha->instance);
542 	}
543 
544 	QL_PM_LOCK(ha);
545 	ha->busy--;
546 	QL_PM_UNLOCK(ha);
547 
548 	return (rval);
549 }
550 
551 /*
552  * ql_handle_uncommon_risc_intr
553  *	Handle an uncommon RISC interrupt.
554  *
555  * Input:
556  *	ha:		adapter state pointer.
557  *	stat:		interrupt status
558  *
559  * Context:
560  *	Interrupt or Kernel context, no mailbox commands allowed.
561  */
562 static void
ql_handle_uncommon_risc_intr(ql_adapter_state_t * ha,uint32_t stat,uint32_t * set_flags)563 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, uint32_t stat,
564     uint32_t *set_flags)
565 {
566 	uint16_t	hccr_reg;
567 
568 	hccr_reg = RD16_IO_REG(ha, hccr);
569 
570 	if (stat & RH_RISC_PAUSED ||
571 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
572 
573 		ADAPTER_STATE_LOCK(ha);
574 		ha->flags |= PARITY_ERROR;
575 		ADAPTER_STATE_UNLOCK(ha);
576 
577 		if (ha->parity_pause_errors == 0 ||
578 		    ha->parity_hccr_err != hccr_reg ||
579 		    ha->parity_stat_err != stat) {
580 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
581 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
582 			    ha->instance, hccr_reg, stat,
583 			    ha->parity_pause_errors);
584 			ha->parity_hccr_err = hccr_reg;
585 			ha->parity_stat_err = stat;
586 		}
587 
588 		EL(ha, "parity/pause error, isp_abort_needed\n");
589 
590 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
591 			ql_reset_chip(ha);
592 		}
593 
594 		if (ha->parity_pause_errors == 0) {
595 			ha->log_parity_pause = B_TRUE;
596 		}
597 
598 		if (ha->parity_pause_errors < 0xffffffff) {
599 			ha->parity_pause_errors++;
600 		}
601 
602 		*set_flags |= ISP_ABORT_NEEDED;
603 
604 		/* Disable ISP interrupts. */
605 		CFG_IST(ha, CFG_CTRL_8021) ? ql_8021_disable_intrs(ha) :
606 		    WRT16_IO_REG(ha, ictrl, 0);
607 		ADAPTER_STATE_LOCK(ha);
608 		ha->flags &= ~INTERRUPTS_ENABLED;
609 		ADAPTER_STATE_UNLOCK(ha);
610 	} else {
611 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
612 		    stat, hccr_reg);
613 	}
614 }
615 
616 /*
617  * ql_spurious_intr
618  *	Inform Solaris of spurious interrupts.
619  *
620  * Input:
621  *	ha:		adapter state pointer.
622  *	intr_clr:	early interrupt clear
623  *
624  * Context:
625  *	Interrupt or Kernel context, no mailbox commands allowed.
626  */
627 static void
ql_spurious_intr(ql_adapter_state_t * ha,int intr_clr)628 ql_spurious_intr(ql_adapter_state_t *ha, int intr_clr)
629 {
630 	ddi_devstate_t	state;
631 
632 	EL(ha, "Spurious interrupt\n");
633 
634 	/* Disable ISP interrupts. */
635 	WRT16_IO_REG(ha, ictrl, 0);
636 	ADAPTER_STATE_LOCK(ha);
637 	ha->flags &= ~INTERRUPTS_ENABLED;
638 	ADAPTER_STATE_UNLOCK(ha);
639 
640 	/* Clear RISC interrupt */
641 	if (intr_clr) {
642 		if (CFG_IST(ha, CFG_CTRL_8021)) {
643 			ql_8021_clr_fw_intr(ha);
644 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
645 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
646 		} else {
647 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
648 		}
649 	}
650 
651 	state = ddi_get_devstate(ha->dip);
652 	if (state == DDI_DEVSTATE_UP) {
653 		/*EMPTY*/
654 		ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
655 		    DDI_DEVICE_FAULT, "spurious interrupts");
656 	}
657 }
658 
659 /*
660  * ql_mbx_completion
661  *	Processes mailbox completions.
662  *
663  * Input:
664  *	ha:		adapter state pointer.
665  *	mb0:		Mailbox 0 contents.
666  *	set_flags:	task daemon flags to set.
667  *	reset_flags:	task daemon flags to reset.
668  *	intr_clr:	early interrupt clear
669  *
670  * Context:
671  *	Interrupt context.
672  */
673 /* ARGSUSED */
674 static void
ql_mbx_completion(ql_adapter_state_t * ha,uint16_t mb0,uint32_t * set_flags,uint32_t * reset_flags,int intr_clr)675 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint32_t *set_flags,
676     uint32_t *reset_flags, int intr_clr)
677 {
678 	uint32_t	index;
679 	uint16_t	cnt;
680 
681 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
682 
683 	/* Load return mailbox registers. */
684 	MBX_REGISTER_LOCK(ha);
685 
686 	if (ha->mcp != NULL) {
687 		ha->mcp->mb[0] = mb0;
688 		index = ha->mcp->in_mb & ~MBX_0;
689 
690 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
691 			index >>= 1;
692 			if (index & MBX_0) {
693 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
694 				    mailbox_out[cnt]);
695 			}
696 		}
697 
698 	} else {
699 		EL(ha, "mcp == NULL\n");
700 	}
701 
702 	if (intr_clr) {
703 		/* Clear RISC interrupt. */
704 		if (CFG_IST(ha, CFG_CTRL_8021)) {
705 			ql_8021_clr_fw_intr(ha);
706 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
707 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
708 		} else {
709 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
710 		}
711 	}
712 
713 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
714 	if (ha->flags & INTERRUPTS_ENABLED) {
715 		cv_broadcast(&ha->cv_mbx_intr);
716 	}
717 
718 	MBX_REGISTER_UNLOCK(ha);
719 
720 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
721 }
722 
723 /*
724  * ql_async_event
725  *	Processes asynchronous events.
726  *
727  * Input:
728  *	ha:		adapter state pointer.
729  *	mbx:		Mailbox 0 register.
730  *	done_q:		head pointer to done queue.
731  *	set_flags:	task daemon flags to set.
732  *	reset_flags:	task daemon flags to reset.
733  *	intr_clr:	early interrupt clear
734  *
735  * Context:
736  *	Interrupt or Kernel context, no mailbox commands allowed.
737  */
738 static void
ql_async_event(ql_adapter_state_t * ha,uint32_t mbx,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags,int intr_clr)739 ql_async_event(ql_adapter_state_t *ha, uint32_t mbx, ql_head_t *done_q,
740     uint32_t *set_flags, uint32_t *reset_flags, int intr_clr)
741 {
742 	uint32_t		handle;
743 	uint32_t		index;
744 	uint16_t		cnt;
745 	uint16_t		mb[MAX_MBOX_COUNT];
746 	ql_srb_t		*sp;
747 	port_id_t		s_id;
748 	ql_tgt_t		*tq;
749 	boolean_t		intr = B_TRUE;
750 	ql_adapter_state_t	*vha;
751 
752 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
753 
754 	/* Setup to process fast completion. */
755 	mb[0] = LSW(mbx);
756 	switch (mb[0]) {
757 	case MBA_SCSI_COMPLETION:
758 		handle = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
759 		    RD16_IO_REG(ha, mailbox_out[2]));
760 		break;
761 
762 	case MBA_CMPLT_1_16BIT:
763 		handle = MSW(mbx);
764 		mb[0] = MBA_SCSI_COMPLETION;
765 		break;
766 
767 	case MBA_CMPLT_1_32BIT:
768 		handle = SHORT_TO_LONG(MSW(mbx),
769 		    RD16_IO_REG(ha, mailbox_out[2]));
770 		mb[0] = MBA_SCSI_COMPLETION;
771 		break;
772 
773 	case MBA_CTIO_COMPLETION:
774 	case MBA_IP_COMPLETION:
775 		handle = CFG_IST(ha, CFG_CTRL_2200) ? SHORT_TO_LONG(
776 		    RD16_IO_REG(ha, mailbox_out[1]),
777 		    RD16_IO_REG(ha, mailbox_out[2])) :
778 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
779 		mb[0] = MBA_SCSI_COMPLETION;
780 		break;
781 
782 	default:
783 		break;
784 	}
785 
786 	/* Handle asynchronous event */
787 	switch (mb[0]) {
788 	case MBA_SCSI_COMPLETION:
789 		QL_PRINT_5(CE_CONT, "(%d): Fast post completion\n",
790 		    ha->instance);
791 
792 		if (intr_clr) {
793 			/* Clear RISC interrupt */
794 			if (CFG_IST(ha, CFG_CTRL_8021)) {
795 				ql_8021_clr_fw_intr(ha);
796 			} else if (CFG_IST(ha, CFG_CTRL_242581)) {
797 				WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
798 			} else {
799 				WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
800 			}
801 			intr = B_FALSE;
802 		}
803 
804 		if ((ha->flags & ONLINE) == 0) {
805 			break;
806 		}
807 
808 		/* Get handle. */
809 		index = handle & OSC_INDEX_MASK;
810 
811 		/* Validate handle. */
812 		sp = index < MAX_OUTSTANDING_COMMANDS ?
813 		    ha->outstanding_cmds[index] : NULL;
814 
815 		if (sp != NULL && sp->handle == handle) {
816 			ha->outstanding_cmds[index] = NULL;
817 			sp->handle = 0;
818 			sp->flags &= ~SRB_IN_TOKEN_ARRAY;
819 
820 			/* Set completed status. */
821 			sp->flags |= SRB_ISP_COMPLETED;
822 
823 			/* Set completion status */
824 			sp->pkt->pkt_reason = CS_COMPLETE;
825 
826 			if (!(sp->flags & SRB_FCP_CMD_PKT)) {
827 				/* Place block on done queue */
828 				ql_add_link_b(done_q, &sp->cmd);
829 			} else {
830 				ql_fast_fcp_post(sp);
831 			}
832 		} else if (handle != QL_FCA_BRAND) {
833 			if (sp == NULL) {
834 				EL(ha, "%xh unknown IOCB handle=%xh\n",
835 				    mb[0], handle);
836 			} else {
837 				EL(ha, "%xh mismatch IOCB handle pkt=%xh, "
838 				    "sp=%xh\n", mb[0], handle, sp->handle);
839 			}
840 
841 			EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, mbx3=%xh,"
842 			    "mbx6=%xh, mbx7=%xh\n", mb[0],
843 			    RD16_IO_REG(ha, mailbox_out[1]),
844 			    RD16_IO_REG(ha, mailbox_out[2]),
845 			    RD16_IO_REG(ha, mailbox_out[3]),
846 			    RD16_IO_REG(ha, mailbox_out[6]),
847 			    RD16_IO_REG(ha, mailbox_out[7]));
848 
849 			(void) ql_binary_fw_dump(ha, FALSE);
850 
851 			if (!(ha->task_daemon_flags &
852 			    (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
853 				EL(ha, "%xh ISP Invalid handle, "
854 				    "isp_abort_needed\n", mb[0]);
855 				*set_flags |= ISP_ABORT_NEEDED;
856 			}
857 		}
858 		break;
859 
860 	case MBA_RESET:		/* Reset */
861 		EL(ha, "%xh Reset received\n", mb[0]);
862 		*set_flags |= RESET_MARKER_NEEDED;
863 		break;
864 
865 	case MBA_SYSTEM_ERR:		/* System Error */
866 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
867 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
868 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
869 		mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
870 
871 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
872 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
873 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
874 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
875 		    RD16_IO_REG(ha, mailbox_out[4]),
876 		    RD16_IO_REG(ha, mailbox_out[5]),
877 		    RD16_IO_REG(ha, mailbox_out[6]), mb[7],
878 		    RD16_IO_REG(ha, mailbox_out[8]),
879 		    RD16_IO_REG(ha, mailbox_out[9]),
880 		    RD16_IO_REG(ha, mailbox_out[10]),
881 		    RD16_IO_REG(ha, mailbox_out[11]),
882 		    RD16_IO_REG(ha, mailbox_out[12]));
883 
884 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
885 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
886 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
887 		    mb[0], RD16_IO_REG(ha, mailbox_out[13]),
888 		    RD16_IO_REG(ha, mailbox_out[14]),
889 		    RD16_IO_REG(ha, mailbox_out[15]),
890 		    RD16_IO_REG(ha, mailbox_out[16]),
891 		    RD16_IO_REG(ha, mailbox_out[17]),
892 		    RD16_IO_REG(ha, mailbox_out[18]),
893 		    RD16_IO_REG(ha, mailbox_out[19]),
894 		    RD16_IO_REG(ha, mailbox_out[20]),
895 		    RD16_IO_REG(ha, mailbox_out[21]),
896 		    RD16_IO_REG(ha, mailbox_out[22]),
897 		    RD16_IO_REG(ha, mailbox_out[23]));
898 
899 		if (ha->reg_off->mbox_cnt > 24) {
900 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
901 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
902 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
903 			    RD16_IO_REG(ha, mailbox_out[24]),
904 			    RD16_IO_REG(ha, mailbox_out[25]),
905 			    RD16_IO_REG(ha, mailbox_out[26]),
906 			    RD16_IO_REG(ha, mailbox_out[27]),
907 			    RD16_IO_REG(ha, mailbox_out[28]),
908 			    RD16_IO_REG(ha, mailbox_out[29]),
909 			    RD16_IO_REG(ha, mailbox_out[30]),
910 			    RD16_IO_REG(ha, mailbox_out[31]));
911 		}
912 
913 		(void) ql_binary_fw_dump(ha, FALSE);
914 
915 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8002, mb[1],
916 		    mb[2], mb[3]);
917 
918 		if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
919 			ADAPTER_STATE_LOCK(ha);
920 			ha->flags |= MPI_RESET_NEEDED;
921 			ADAPTER_STATE_UNLOCK(ha);
922 		}
923 
924 		*set_flags |= ISP_ABORT_NEEDED;
925 		ha->xioctl->ControllerErrorCount++;
926 		break;
927 
928 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
929 		EL(ha, "%xh Request Transfer Error received, "
930 		    "isp_abort_needed\n", mb[0]);
931 
932 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8003,
933 		    RD16_IO_REG(ha, mailbox_out[1]),
934 		    RD16_IO_REG(ha, mailbox_out[2]),
935 		    RD16_IO_REG(ha, mailbox_out[3]));
936 
937 		*set_flags |= ISP_ABORT_NEEDED;
938 		ha->xioctl->ControllerErrorCount++;
939 		break;
940 
941 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
942 		EL(ha, "%xh Response Transfer Error received,"
943 		    " isp_abort_needed\n", mb[0]);
944 
945 		(void) ql_flash_errlog(ha, FLASH_ERRLOG_AEN_8004,
946 		    RD16_IO_REG(ha, mailbox_out[1]),
947 		    RD16_IO_REG(ha, mailbox_out[2]),
948 		    RD16_IO_REG(ha, mailbox_out[3]));
949 
950 		*set_flags |= ISP_ABORT_NEEDED;
951 		ha->xioctl->ControllerErrorCount++;
952 		break;
953 
954 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
955 		EL(ha, "%xh Request Queue Wake-up received\n",
956 		    mb[0]);
957 		break;
958 
959 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
960 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
961 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
962 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
963 
964 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
965 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
966 
967 		switch (mb[1]) {
968 		case MLA_LOGIN_OPERATIONAL_FW:
969 			ADAPTER_STATE_LOCK(ha);
970 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
971 			ADAPTER_STATE_UNLOCK(ha);
972 			break;
973 		case MLA_PANIC_RECOVERY:
974 		case MLA_LOGIN_DIAGNOSTIC_FW:
975 		case MLA_LOGIN_GOLDEN_FW:
976 		case MLA_REJECT_RESPONSE:
977 		default:
978 			break;
979 		}
980 		break;
981 
982 	case MBA_LIP_F8:	/* Received a LIP F8. */
983 	case MBA_LIP_RESET:	/* LIP reset occurred. */
984 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
985 		if (CFG_IST(ha, CFG_CTRL_8081)) {
986 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
987 			    "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
988 			    RD16_IO_REG(ha, mailbox_out[2]));
989 		} else {
990 			EL(ha, "%xh LIP received\n", mb[0]);
991 		}
992 
993 		ADAPTER_STATE_LOCK(ha);
994 		ha->flags &= ~POINT_TO_POINT;
995 		ADAPTER_STATE_UNLOCK(ha);
996 
997 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
998 			*set_flags |= LOOP_DOWN;
999 		}
1000 		ql_port_state(ha, FC_STATE_OFFLINE,
1001 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1002 
1003 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1004 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1005 		}
1006 
1007 		ha->adapter_stats->lip_count++;
1008 
1009 		/* Update AEN queue. */
1010 		ha->xioctl->TotalLipResets++;
1011 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1012 			ql_enqueue_aen(ha, mb[0], NULL);
1013 		}
1014 		break;
1015 
1016 	case MBA_LOOP_UP:
1017 		if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322 |
1018 		    CFG_CTRL_24258081))) {
1019 			ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]);
1020 			if (ha->iidma_rate == IIDMA_RATE_1GB) {
1021 				ha->state = FC_PORT_STATE_MASK(
1022 				    ha->state) | FC_STATE_1GBIT_SPEED;
1023 				index = 1;
1024 			} else if (ha->iidma_rate == IIDMA_RATE_2GB) {
1025 				ha->state = FC_PORT_STATE_MASK(
1026 				    ha->state) | FC_STATE_2GBIT_SPEED;
1027 				index = 2;
1028 			} else if (ha->iidma_rate == IIDMA_RATE_4GB) {
1029 				ha->state = FC_PORT_STATE_MASK(
1030 				    ha->state) | FC_STATE_4GBIT_SPEED;
1031 				index = 4;
1032 			} else if (ha->iidma_rate == IIDMA_RATE_8GB) {
1033 				ha->state = FC_PORT_STATE_MASK(
1034 				    ha->state) | FC_STATE_8GBIT_SPEED;
1035 				index = 8;
1036 			} else if (ha->iidma_rate == IIDMA_RATE_10GB) {
1037 				ha->state = FC_PORT_STATE_MASK(
1038 				    ha->state) | FC_STATE_10GBIT_SPEED;
1039 				index = 10;
1040 			} else {
1041 				ha->state = FC_PORT_STATE_MASK(
1042 				    ha->state);
1043 				index = 0;
1044 			}
1045 		} else {
1046 			ha->iidma_rate = IIDMA_RATE_1GB;
1047 			ha->state = FC_PORT_STATE_MASK(ha->state) |
1048 			    FC_STATE_FULL_SPEED;
1049 			index = 1;
1050 		}
1051 
1052 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1053 			vha->state = FC_PORT_STATE_MASK(vha->state) |
1054 			    FC_PORT_SPEED_MASK(ha->state);
1055 		}
1056 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1057 
1058 		/* Update AEN queue. */
1059 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1060 			ql_enqueue_aen(ha, mb[0], NULL);
1061 		}
1062 		break;
1063 
1064 	case MBA_LOOP_DOWN:
1065 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1066 		    "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1067 		    RD16_IO_REG(ha, mailbox_out[2]),
1068 		    RD16_IO_REG(ha, mailbox_out[3]),
1069 		    RD16_IO_REG(ha, mailbox_out[4]));
1070 
1071 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1072 			*set_flags |= LOOP_DOWN;
1073 		}
1074 		ql_port_state(ha, FC_STATE_OFFLINE,
1075 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1076 
1077 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1078 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1079 		}
1080 
1081 		if (CFG_IST(ha, CFG_CTRL_258081)) {
1082 			ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1083 		}
1084 
1085 		/* Update AEN queue. */
1086 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1087 			ql_enqueue_aen(ha, mb[0], NULL);
1088 		}
1089 		break;
1090 
1091 	case MBA_PORT_UPDATE:
1092 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1093 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1094 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1095 		    RD16_IO_REG(ha, mailbox_out[3]) : 0);
1096 
1097 		/* Locate port state structure. */
1098 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1099 			if (vha->vp_index == LSB(mb[3])) {
1100 				break;
1101 			}
1102 		}
1103 		if (vha == NULL) {
1104 			break;
1105 		}
1106 
1107 		if (CFG_IST(ha, CFG_CTRL_8081) && mb[1] == 0xffff &&
1108 		    mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1109 		    MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1110 		    MSB(mb[3]) == 0x1e)) {
1111 			/*
1112 			 * received FLOGI reject
1113 			 * received FLOGO
1114 			 * FCF configuration changed
1115 			 * FIP Clear Virtual Link received
1116 			 * FKA timeout
1117 			 */
1118 			if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1119 				*set_flags |= LOOP_DOWN;
1120 			}
1121 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1122 			    COMMAND_WAIT_NEEDED | LOOP_DOWN);
1123 			if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1124 				ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1125 			}
1126 		/*
1127 		 * In N port 2 N port topology the FW provides a port
1128 		 * database entry at loop_id 0x7fe which we use to
1129 		 * acquire the Ports WWPN.
1130 		 */
1131 		} else if ((mb[1] != 0x7fe) &&
1132 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1133 		    (CFG_IST(ha, CFG_CTRL_24258081) &&
1134 		    (mb[1] != 0xffff || mb[2] != 6 || mb[3] != 0))))) {
1135 			EL(ha, "%xh Port Database Update, Login/Logout "
1136 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1137 			    mb[0], mb[1], mb[2], mb[3]);
1138 		} else {
1139 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1140 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1141 			    mb[3]);
1142 			*set_flags |= LOOP_RESYNC_NEEDED;
1143 			*set_flags &= ~LOOP_DOWN;
1144 			*reset_flags |= LOOP_DOWN;
1145 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1146 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1147 			TASK_DAEMON_LOCK(ha);
1148 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1149 			vha->task_daemon_flags &= ~LOOP_DOWN;
1150 			TASK_DAEMON_UNLOCK(ha);
1151 			ADAPTER_STATE_LOCK(ha);
1152 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1153 			ADAPTER_STATE_UNLOCK(ha);
1154 		}
1155 
1156 		/* Update AEN queue. */
1157 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1158 			ql_enqueue_aen(ha, mb[0], NULL);
1159 		}
1160 		break;
1161 
1162 	case MBA_RSCN_UPDATE:
1163 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1164 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1165 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1166 		    RD16_IO_REG(ha, mailbox_out[3]) : 0);
1167 
1168 		/* Locate port state structure. */
1169 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1170 			if (vha->vp_index == LSB(mb[3])) {
1171 				break;
1172 			}
1173 		}
1174 
1175 		if (vha == NULL) {
1176 			break;
1177 		}
1178 
1179 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1180 		    MSB(mb[2]) == vha->d_id.b.area &&
1181 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1182 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1183 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1184 		} else {
1185 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1186 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1187 			if (FC_PORT_STATE_MASK(vha->state) !=
1188 			    FC_STATE_OFFLINE) {
1189 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1190 				TASK_DAEMON_LOCK(ha);
1191 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1192 				TASK_DAEMON_UNLOCK(ha);
1193 				*set_flags |= RSCN_UPDATE_NEEDED;
1194 			}
1195 		}
1196 
1197 		/* Update AEN queue. */
1198 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1199 			ql_enqueue_aen(ha, mb[0], NULL);
1200 		}
1201 		break;
1202 
1203 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1204 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1205 		    RD16_IO_REG(ha, mailbox_out[1]));
1206 		break;
1207 
1208 	case MBA_IP_RECEIVE:
1209 	case MBA_IP_BROADCAST:
1210 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1211 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1212 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1213 
1214 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1215 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1216 
1217 		/* Locate device queue. */
1218 		s_id.b.al_pa = LSB(mb[2]);
1219 		s_id.b.area = MSB(mb[2]);
1220 		s_id.b.domain = LSB(mb[1]);
1221 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1222 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1223 			break;
1224 		}
1225 
1226 		cnt = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1227 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1228 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1229 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1230 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1231 
1232 		tq->ub_sequence_length = mb[3];
1233 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1234 		if (mb[3] % cnt) {
1235 			tq->ub_total_seg_cnt++;
1236 		}
1237 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1238 
1239 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1240 		    index++) {
1241 			mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1242 		}
1243 
1244 		tq->ub_seq_id = ++ha->ub_seq_id;
1245 		tq->ub_seq_cnt = 0;
1246 		tq->ub_frame_ro = 0;
1247 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1248 		    (CFG_IST(ha, CFG_CTRL_24258081) ? BROADCAST_24XX_HDL :
1249 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1250 		ha->rcv_dev_q = tq;
1251 
1252 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1253 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1254 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1255 			    QL_SUCCESS) {
1256 				EL(ha, "ql_ub_frame_hdr failed, "
1257 				    "isp_abort_needed\n");
1258 				*set_flags |= ISP_ABORT_NEEDED;
1259 				break;
1260 			}
1261 		}
1262 		break;
1263 
1264 	case MBA_IP_LOW_WATER_MARK:
1265 	case MBA_IP_RCV_BUFFER_EMPTY:
1266 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1267 		    mb[0]);
1268 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1269 		break;
1270 
1271 	case MBA_IP_HDR_DATA_SPLIT:
1272 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1273 		break;
1274 
1275 	case MBA_ERROR_LOGGING_DISABLED:
1276 		EL(ha, "%xh error logging disabled received, "
1277 		    "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1278 		break;
1279 
1280 	case MBA_POINT_TO_POINT:
1281 	/* case MBA_DCBX_COMPLETED: */
1282 		if (CFG_IST(ha, CFG_CTRL_8081)) {
1283 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1284 		} else {
1285 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1286 		}
1287 		ADAPTER_STATE_LOCK(ha);
1288 		ha->flags |= POINT_TO_POINT;
1289 		ADAPTER_STATE_UNLOCK(ha);
1290 		break;
1291 
1292 	case MBA_FCF_CONFIG_ERROR:
1293 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1294 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1295 		break;
1296 
1297 	case MBA_DCBX_PARAM_CHANGED:
1298 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1299 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1300 		break;
1301 
1302 	case MBA_CHG_IN_CONNECTION:
1303 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1304 		if (mb[1] == 2) {
1305 			EL(ha, "%xh Change In Connection received, "
1306 			    "mbx1=%xh\n",  mb[0], mb[1]);
1307 			ADAPTER_STATE_LOCK(ha);
1308 			ha->flags &= ~POINT_TO_POINT;
1309 			ADAPTER_STATE_UNLOCK(ha);
1310 			if (ha->topology & QL_N_PORT) {
1311 				ha->topology = (uint8_t)(ha->topology &
1312 				    ~QL_N_PORT);
1313 				ha->topology = (uint8_t)(ha->topology |
1314 				    QL_NL_PORT);
1315 			}
1316 		} else {
1317 			EL(ha, "%xh Change In Connection received, "
1318 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1319 			*set_flags |= ISP_ABORT_NEEDED;
1320 		}
1321 		break;
1322 
1323 	case MBA_ZIO_UPDATE:
1324 		EL(ha, "%xh ZIO response received\n", mb[0]);
1325 
1326 		ha->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1327 		ql_response_pkt(ha, done_q, set_flags, reset_flags, intr_clr);
1328 		intr = B_FALSE;
1329 		break;
1330 
1331 	case MBA_PORT_BYPASS_CHANGED:
1332 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1333 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1334 		/*
1335 		 * Event generated when there is a transition on
1336 		 * port bypass of crystal+.
1337 		 * Mailbox 1:	Bit 0 - External.
1338 		 *		Bit 2 - Internal.
1339 		 * When the bit is 0, the port is bypassed.
1340 		 *
1341 		 * For now we will generate a LIP for all cases.
1342 		 */
1343 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1344 		break;
1345 
1346 	case MBA_RECEIVE_ERROR:
1347 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1348 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1349 		    RD16_IO_REG(ha, mailbox_out[2]));
1350 		break;
1351 
1352 	case MBA_LS_RJT_SENT:
1353 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1354 		    RD16_IO_REG(ha, mailbox_out[1]));
1355 		break;
1356 
1357 	case MBA_FW_RESTART_COMP:
1358 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1359 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1360 		break;
1361 
1362 	/*
1363 	 * MBA_IDC_COMPLETE &  MBA_IDC_NOTIFICATION: We won't get another
1364 	 * IDC async event until we ACK the current one.
1365 	 */
1366 	case MBA_IDC_COMPLETE:
1367 		ha->idc_mb[0] = mb[0];
1368 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1369 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1370 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1371 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1372 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1373 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1374 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1375 		EL(ha, "%xh Inter-driver communication complete received, "
1376 		    " mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh,"
1377 		    " mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1378 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1379 		    ha->idc_mb[6], ha->idc_mb[7]);
1380 		*set_flags |= IDC_EVENT;
1381 		break;
1382 
1383 	case MBA_IDC_NOTIFICATION:
1384 		ha->idc_mb[0] = mb[0];
1385 		ha->idc_mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1386 		ha->idc_mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1387 		ha->idc_mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1388 		ha->idc_mb[4] = RD16_IO_REG(ha, mailbox_out[4]);
1389 		ha->idc_mb[5] = RD16_IO_REG(ha, mailbox_out[5]);
1390 		ha->idc_mb[6] = RD16_IO_REG(ha, mailbox_out[6]);
1391 		ha->idc_mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
1392 		EL(ha, "%xh Inter-driver communication request notification "
1393 		    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh, mbx4=%xh, "
1394 		    "mbx5=%xh, mbx6=%xh, mbx7=%xh\n", mb[0], ha->idc_mb[1],
1395 		    ha->idc_mb[2], ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5],
1396 		    ha->idc_mb[6], ha->idc_mb[7]);
1397 		*set_flags |= IDC_EVENT;
1398 		break;
1399 
1400 	case MBA_IDC_TIME_EXTENDED:
1401 		EL(ha, "%xh Inter-driver communication time extended received,"
1402 		    " mbx1=%xh, mbx2=%xh\n", mb[0],
1403 		    RD16_IO_REG(ha, mailbox_out[1]),
1404 		    RD16_IO_REG(ha, mailbox_out[2]));
1405 		break;
1406 
1407 	default:
1408 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1409 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1410 		    RD16_IO_REG(ha, mailbox_out[2]),
1411 		    RD16_IO_REG(ha, mailbox_out[3]));
1412 		break;
1413 	}
1414 
1415 	/* Clear RISC interrupt */
1416 	if (intr && intr_clr) {
1417 		if (CFG_IST(ha, CFG_CTRL_8021)) {
1418 			ql_8021_clr_fw_intr(ha);
1419 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
1420 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1421 		} else {
1422 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1423 		}
1424 	}
1425 
1426 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1427 }
1428 
1429 /*
1430  * ql_fast_fcp_post
1431  *	Fast path for good SCSI I/O completion.
1432  *
1433  * Input:
1434  *	sp:	SRB pointer.
1435  *
1436  * Context:
1437  *	Interrupt or Kernel context, no mailbox commands allowed.
1438  */
1439 static void
ql_fast_fcp_post(ql_srb_t * sp)1440 ql_fast_fcp_post(ql_srb_t *sp)
1441 {
1442 	ql_adapter_state_t	*ha = sp->ha;
1443 	ql_lun_t		*lq = sp->lun_queue;
1444 	ql_tgt_t		*tq = lq->target_queue;
1445 
1446 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1447 
1448 	/* Acquire device queue lock. */
1449 	DEVICE_QUEUE_LOCK(tq);
1450 
1451 	/* Decrement outstanding commands on device. */
1452 	if (tq->outcnt != 0) {
1453 		tq->outcnt--;
1454 	}
1455 
1456 	if (sp->flags & SRB_FCP_CMD_PKT) {
1457 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1458 			/*
1459 			 * Clear the flag for this LUN so that
1460 			 * untagged commands can be submitted
1461 			 * for it.
1462 			 */
1463 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1464 		}
1465 
1466 		if (lq->lun_outcnt != 0) {
1467 			lq->lun_outcnt--;
1468 		}
1469 	}
1470 
1471 	/* Reset port down retry count on good completion. */
1472 	tq->port_down_retry_count = ha->port_down_retry_count;
1473 	tq->qfull_retry_count = ha->qfull_retry_count;
1474 	ha->pha->timeout_cnt = 0;
1475 
1476 	/* Remove command from watchdog queue. */
1477 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1478 		ql_remove_link(&tq->wdg, &sp->wdg);
1479 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1480 	}
1481 
1482 	if (lq->cmd.first != NULL) {
1483 		ql_next(ha, lq);
1484 	} else {
1485 		/* Release LU queue specific lock. */
1486 		DEVICE_QUEUE_UNLOCK(tq);
1487 		if (ha->pha->pending_cmds.first != NULL) {
1488 			ql_start_iocb(ha, NULL);
1489 		}
1490 	}
1491 
1492 	/* Sync buffers if required.  */
1493 	if (sp->flags & SRB_MS_PKT) {
1494 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1495 		    DDI_DMA_SYNC_FORCPU);
1496 	}
1497 
1498 	/* Map ISP completion codes. */
1499 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1500 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1501 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1502 
1503 	/* Now call the pkt completion callback */
1504 	if (sp->flags & SRB_POLL) {
1505 		sp->flags &= ~SRB_POLL;
1506 	} else if (sp->pkt->pkt_comp) {
1507 		INTR_UNLOCK(ha);
1508 		(*sp->pkt->pkt_comp)(sp->pkt);
1509 		INTR_LOCK(ha);
1510 	}
1511 
1512 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1513 }
1514 
1515 /*
1516  * ql_response_pkt
1517  *	Processes response entry.
1518  *
1519  * Input:
1520  *	ha:		adapter state pointer.
1521  *	done_q:		head pointer to done queue.
1522  *	set_flags:	task daemon flags to set.
1523  *	reset_flags:	task daemon flags to reset.
1524  *	intr_clr:	early interrupt clear
1525  *
1526  * Context:
1527  *	Interrupt or Kernel context, no mailbox commands allowed.
1528  */
1529 static void
ql_response_pkt(ql_adapter_state_t * ha,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags,int intr_clr)1530 ql_response_pkt(ql_adapter_state_t *ha, ql_head_t *done_q, uint32_t *set_flags,
1531     uint32_t *reset_flags, int intr_clr)
1532 {
1533 	response_t	*pkt;
1534 	uint32_t	dma_sync_size_1 = 0;
1535 	uint32_t	dma_sync_size_2 = 0;
1536 	int		status = 0;
1537 
1538 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1539 
1540 	/* Clear RISC interrupt */
1541 	if (intr_clr) {
1542 		if (CFG_IST(ha, CFG_CTRL_8021)) {
1543 			ql_8021_clr_fw_intr(ha);
1544 		} else if (CFG_IST(ha, CFG_CTRL_242581)) {
1545 			WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
1546 		} else {
1547 			WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
1548 		}
1549 	}
1550 
1551 	if (ha->isp_rsp_index >= RESPONSE_ENTRY_CNT) {
1552 		EL(ha, "index error = %xh, isp_abort_needed",
1553 		    ha->isp_rsp_index);
1554 		*set_flags |= ISP_ABORT_NEEDED;
1555 		return;
1556 	}
1557 
1558 	if ((ha->flags & ONLINE) == 0) {
1559 		QL_PRINT_3(CE_CONT, "(%d): not onlne, done\n", ha->instance);
1560 		return;
1561 	}
1562 
1563 	/* Calculate size of response queue entries to sync. */
1564 	if (ha->isp_rsp_index > ha->rsp_ring_index) {
1565 		dma_sync_size_1 = (uint32_t)
1566 		    ((uint32_t)(ha->isp_rsp_index - ha->rsp_ring_index) *
1567 		    RESPONSE_ENTRY_SIZE);
1568 	} else if (ha->isp_rsp_index == 0) {
1569 		dma_sync_size_1 = (uint32_t)
1570 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1571 		    RESPONSE_ENTRY_SIZE);
1572 	} else {
1573 		/* Responses wrap around the Q */
1574 		dma_sync_size_1 = (uint32_t)
1575 		    ((uint32_t)(RESPONSE_ENTRY_CNT - ha->rsp_ring_index) *
1576 		    RESPONSE_ENTRY_SIZE);
1577 		dma_sync_size_2 = (uint32_t)
1578 		    (ha->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1579 	}
1580 
1581 	/* Sync DMA buffer. */
1582 	(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1583 	    (off_t)(ha->rsp_ring_index * RESPONSE_ENTRY_SIZE +
1584 	    RESPONSE_Q_BUFFER_OFFSET), dma_sync_size_1,
1585 	    DDI_DMA_SYNC_FORKERNEL);
1586 	if (dma_sync_size_2) {
1587 		(void) ddi_dma_sync(ha->hba_buf.dma_handle,
1588 		    RESPONSE_Q_BUFFER_OFFSET, dma_sync_size_2,
1589 		    DDI_DMA_SYNC_FORKERNEL);
1590 	}
1591 
1592 	while (ha->rsp_ring_index != ha->isp_rsp_index) {
1593 		pkt = ha->response_ring_ptr;
1594 
1595 		QL_PRINT_5(CE_CONT, "(%d): ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1596 		    ha->instance, ha->rsp_ring_index, ha->isp_rsp_index);
1597 		QL_DUMP_5((uint8_t *)ha->response_ring_ptr, 8,
1598 		    RESPONSE_ENTRY_SIZE);
1599 
1600 		/* Adjust ring index. */
1601 		ha->rsp_ring_index++;
1602 		if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
1603 			ha->rsp_ring_index = 0;
1604 			ha->response_ring_ptr = ha->response_ring_bp;
1605 		} else {
1606 			ha->response_ring_ptr++;
1607 		}
1608 
1609 		/* Process packet. */
1610 		if (ha->status_srb != NULL && pkt->entry_type !=
1611 		    STATUS_CONT_TYPE) {
1612 			ql_add_link_b(done_q, &ha->status_srb->cmd);
1613 			ha->status_srb = NULL;
1614 		}
1615 
1616 		pkt->entry_status = (uint8_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1617 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1618 
1619 		if (pkt->entry_status != 0) {
1620 			ql_error_entry(ha, pkt, done_q, set_flags,
1621 			    reset_flags);
1622 		} else {
1623 			switch (pkt->entry_type) {
1624 			case STATUS_TYPE:
1625 				status |= CFG_IST(ha, CFG_CTRL_24258081) ?
1626 				    ql_24xx_status_entry(ha,
1627 				    (sts_24xx_entry_t *)pkt, done_q, set_flags,
1628 				    reset_flags) :
1629 				    ql_status_entry(ha, (sts_entry_t *)pkt,
1630 				    done_q, set_flags, reset_flags);
1631 				break;
1632 			case STATUS_CONT_TYPE:
1633 				ql_status_cont_entry(ha,
1634 				    (sts_cont_entry_t *)pkt, done_q, set_flags,
1635 				    reset_flags);
1636 				break;
1637 			case IP_TYPE:
1638 			case IP_A64_TYPE:
1639 			case IP_CMD_TYPE:
1640 				ql_ip_entry(ha, (ip_entry_t *)pkt, done_q,
1641 				    set_flags, reset_flags);
1642 				break;
1643 			case IP_RECEIVE_TYPE:
1644 				ql_ip_rcv_entry(ha,
1645 				    (ip_rcv_entry_t *)pkt, done_q, set_flags,
1646 				    reset_flags);
1647 				break;
1648 			case IP_RECEIVE_CONT_TYPE:
1649 				ql_ip_rcv_cont_entry(ha,
1650 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1651 				    set_flags, reset_flags);
1652 				break;
1653 			case IP_24XX_RECEIVE_TYPE:
1654 				ql_ip_24xx_rcv_entry(ha,
1655 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1656 				    set_flags, reset_flags);
1657 				break;
1658 			case MS_TYPE:
1659 				ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1660 				    set_flags, reset_flags);
1661 				break;
1662 			case REPORT_ID_TYPE:
1663 				ql_report_id_entry(ha, (report_id_1_t *)pkt,
1664 				    done_q, set_flags, reset_flags);
1665 				break;
1666 			case ELS_PASSTHRU_TYPE:
1667 				ql_els_passthru_entry(ha,
1668 				    (els_passthru_entry_rsp_t *)pkt,
1669 				    done_q, set_flags, reset_flags);
1670 				break;
1671 			case IP_BUF_POOL_TYPE:
1672 			case MARKER_TYPE:
1673 			case VP_MODIFY_TYPE:
1674 			case VP_CONTROL_TYPE:
1675 				break;
1676 			default:
1677 				EL(ha, "Unknown IOCB entry type=%xh\n",
1678 				    pkt->entry_type);
1679 				break;
1680 			}
1681 		}
1682 	}
1683 
1684 	/* Inform RISC of processed responses. */
1685 	WRT16_IO_REG(ha, resp_out, ha->rsp_ring_index);
1686 
1687 	/* RESET packet received delay for possible async event. */
1688 	if (status & BIT_0) {
1689 		drv_usecwait(500000);
1690 	}
1691 
1692 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1693 }
1694 
1695 /*
1696  * ql_error_entry
1697  *	Processes error entry.
1698  *
1699  * Input:
1700  *	ha = adapter state pointer.
1701  *	pkt = entry pointer.
1702  *	done_q = head pointer to done queue.
1703  *	set_flags = task daemon flags to set.
1704  *	reset_flags = task daemon flags to reset.
1705  *
1706  * Context:
1707  *	Interrupt or Kernel context, no mailbox commands allowed.
1708  */
1709 /* ARGSUSED */
1710 static void
ql_error_entry(ql_adapter_state_t * ha,response_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)1711 ql_error_entry(ql_adapter_state_t *ha, response_t *pkt, ql_head_t *done_q,
1712     uint32_t *set_flags, uint32_t *reset_flags)
1713 {
1714 	ql_srb_t	*sp;
1715 	uint32_t	index, resp_identifier;
1716 
1717 	if (pkt->entry_type == INVALID_ENTRY_TYPE) {
1718 		EL(ha, "Aborted command\n");
1719 		return;
1720 	}
1721 
1722 	QL_PRINT_2(CE_CONT, "(%d): started, packet:\n", ha->instance);
1723 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1724 
1725 	if (pkt->entry_status & BIT_6) {
1726 		EL(ha, "Request Queue DMA error\n");
1727 	} else if (pkt->entry_status & BIT_5) {
1728 		EL(ha, "Invalid Entry Order\n");
1729 	} else if (pkt->entry_status & BIT_4) {
1730 		EL(ha, "Invalid Entry Count\n");
1731 	} else if (pkt->entry_status & BIT_3) {
1732 		EL(ha, "Invalid Entry Parameter\n");
1733 	} else if (pkt->entry_status & BIT_2) {
1734 		EL(ha, "Invalid Entry Type\n");
1735 	} else if (pkt->entry_status & BIT_1) {
1736 		EL(ha, "Busy\n");
1737 	} else {
1738 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1739 	}
1740 
1741 	/* Validate the response entry handle. */
1742 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1743 	index = resp_identifier & OSC_INDEX_MASK;
1744 	if (index < MAX_OUTSTANDING_COMMANDS) {
1745 		/* the index seems reasonable */
1746 		sp = ha->outstanding_cmds[index];
1747 		if (sp != NULL) {
1748 			if (sp->handle == resp_identifier) {
1749 				/* Neo, you're the one... */
1750 				ha->outstanding_cmds[index] = NULL;
1751 				sp->handle = 0;
1752 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1753 			} else {
1754 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1755 				    resp_identifier, sp->handle);
1756 				sp = NULL;
1757 				ql_signal_abort(ha, set_flags);
1758 			}
1759 		} else {
1760 			sp = ql_verify_preprocessed_cmd(ha,
1761 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1762 		}
1763 	} else {
1764 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1765 		    index, resp_identifier);
1766 		ql_signal_abort(ha, set_flags);
1767 	}
1768 
1769 	if (sp != NULL) {
1770 		/* Bad payload or header */
1771 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1772 			/* Bad payload or header, set error status. */
1773 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1774 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1775 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1776 		} else {
1777 			/* Set error status. */
1778 			sp->pkt->pkt_reason = CS_UNKNOWN;
1779 		}
1780 
1781 		/* Set completed status. */
1782 		sp->flags |= SRB_ISP_COMPLETED;
1783 
1784 		/* Place command on done queue. */
1785 		ql_add_link_b(done_q, &sp->cmd);
1786 
1787 	}
1788 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1789 }
1790 
1791 /*
1792  * ql_status_entry
1793  *	Processes received ISP2200-2300 status entry.
1794  *
1795  * Input:
1796  *	ha:		adapter state pointer.
1797  *	pkt:		entry pointer.
1798  *	done_q:		done queue pointer.
1799  *	set_flags:	task daemon flags to set.
1800  *	reset_flags:	task daemon flags to reset.
1801  *
1802  * Returns:
1803  *	BIT_0 = CS_RESET status received.
1804  *
1805  * Context:
1806  *	Interrupt or Kernel context, no mailbox commands allowed.
1807  */
1808 /* ARGSUSED */
1809 static int
ql_status_entry(ql_adapter_state_t * ha,sts_entry_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)1810 ql_status_entry(ql_adapter_state_t *ha, sts_entry_t *pkt,
1811     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1812 {
1813 	ql_srb_t		*sp;
1814 	uint32_t		index, resp_identifier;
1815 	uint16_t		comp_status;
1816 	int			rval = 0;
1817 
1818 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1819 
1820 	/* Validate the response entry handle. */
1821 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1822 	index = resp_identifier & OSC_INDEX_MASK;
1823 	if (index < MAX_OUTSTANDING_COMMANDS) {
1824 		/* the index seems reasonable */
1825 		sp = ha->outstanding_cmds[index];
1826 		if (sp != NULL) {
1827 			if (sp->handle == resp_identifier) {
1828 				/* Neo, you're the one... */
1829 				ha->outstanding_cmds[index] = NULL;
1830 				sp->handle = 0;
1831 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1832 			} else {
1833 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1834 				    resp_identifier, sp->handle);
1835 				sp = NULL;
1836 				ql_signal_abort(ha, set_flags);
1837 			}
1838 		} else {
1839 			sp = ql_verify_preprocessed_cmd(ha,
1840 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1841 		}
1842 	} else {
1843 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1844 		    index, resp_identifier);
1845 		ql_signal_abort(ha, set_flags);
1846 	}
1847 
1848 	if (sp != NULL) {
1849 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1850 		    &pkt->comp_status);
1851 
1852 		/*
1853 		 * We dont care about SCSI QFULLs.
1854 		 */
1855 		if (comp_status == CS_QUEUE_FULL) {
1856 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1857 			    sp->lun_queue->target_queue->d_id.b24,
1858 			    sp->lun_queue->lun_no);
1859 			comp_status = CS_COMPLETE;
1860 		}
1861 
1862 		/*
1863 		 * 2300 firmware marks completion status as data underrun
1864 		 * for scsi qfulls. Make it transport complete.
1865 		 */
1866 		if ((CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) &&
1867 		    (comp_status == CS_DATA_UNDERRUN) &&
1868 		    (pkt->scsi_status_l != 0)) {
1869 			comp_status = CS_COMPLETE;
1870 		}
1871 
1872 		/*
1873 		 * Workaround T3 issue where we do not get any data xferred
1874 		 * but get back a good status.
1875 		 */
1876 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1877 		    comp_status == CS_COMPLETE &&
1878 		    pkt->scsi_status_l == 0 &&
1879 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1880 		    pkt->residual_length == 0 &&
1881 		    sp->fcp &&
1882 		    sp->fcp->fcp_data_len != 0 &&
1883 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1884 		    SF_DATA_OUT) {
1885 			comp_status = CS_ABORTED;
1886 		}
1887 
1888 		if (sp->flags & SRB_MS_PKT) {
1889 			/*
1890 			 * Ideally it should never be true. But there
1891 			 * is a bug in FW which upon receiving invalid
1892 			 * parameters in MS IOCB returns it as
1893 			 * status entry and not as ms entry type.
1894 			 */
1895 			ql_ms_entry(ha, (ms_entry_t *)pkt, done_q,
1896 			    set_flags, reset_flags);
1897 			QL_PRINT_3(CE_CONT, "(%d): ql_ms_entry done\n",
1898 			    ha->instance);
1899 			return (0);
1900 		}
1901 
1902 		/*
1903 		 * Fast path to good SCSI I/O completion
1904 		 */
1905 		if ((comp_status == CS_COMPLETE) &
1906 		    (!pkt->scsi_status_l) &
1907 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
1908 			/* Set completed status. */
1909 			sp->flags |= SRB_ISP_COMPLETED;
1910 			sp->pkt->pkt_reason = comp_status;
1911 			ql_fast_fcp_post(sp);
1912 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
1913 			    ha->instance);
1914 			return (0);
1915 		}
1916 		rval = ql_status_error(ha, sp, pkt, done_q, set_flags,
1917 		    reset_flags);
1918 	}
1919 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
1920 
1921 	return (rval);
1922 }
1923 
1924 /*
1925  * ql_24xx_status_entry
1926  *	Processes received ISP24xx status entry.
1927  *
1928  * Input:
1929  *	ha:		adapter state pointer.
1930  *	pkt:		entry pointer.
1931  *	done_q:		done queue pointer.
1932  *	set_flags:	task daemon flags to set.
1933  *	reset_flags:	task daemon flags to reset.
1934  *
1935  * Returns:
1936  *	BIT_0 = CS_RESET status received.
1937  *
1938  * Context:
1939  *	Interrupt or Kernel context, no mailbox commands allowed.
1940  */
1941 /* ARGSUSED */
1942 static int
ql_24xx_status_entry(ql_adapter_state_t * ha,sts_24xx_entry_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)1943 ql_24xx_status_entry(ql_adapter_state_t *ha, sts_24xx_entry_t *pkt,
1944     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
1945 {
1946 	ql_srb_t		*sp = NULL;
1947 	uint16_t		comp_status;
1948 	uint32_t		index, resp_identifier;
1949 	int			rval = 0;
1950 
1951 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
1952 
1953 	/* Validate the response entry handle. */
1954 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt->handle);
1955 	index = resp_identifier & OSC_INDEX_MASK;
1956 	if (index < MAX_OUTSTANDING_COMMANDS) {
1957 		/* the index seems reasonable */
1958 		sp = ha->outstanding_cmds[index];
1959 		if (sp != NULL) {
1960 			if (sp->handle == resp_identifier) {
1961 				/* Neo, you're the one... */
1962 				ha->outstanding_cmds[index] = NULL;
1963 				sp->handle = 0;
1964 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1965 			} else {
1966 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1967 				    resp_identifier, sp->handle);
1968 				sp = NULL;
1969 				ql_signal_abort(ha, set_flags);
1970 			}
1971 		} else {
1972 			sp = ql_verify_preprocessed_cmd(ha,
1973 			    (uint32_t *)&pkt->handle, set_flags, reset_flags);
1974 		}
1975 	} else {
1976 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1977 		    index, resp_identifier);
1978 		ql_signal_abort(ha, set_flags);
1979 	}
1980 
1981 	if (sp != NULL) {
1982 		comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
1983 		    &pkt->comp_status);
1984 
1985 		/* We dont care about SCSI QFULLs. */
1986 		if (comp_status == CS_QUEUE_FULL) {
1987 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1988 			    sp->lun_queue->target_queue->d_id.b24,
1989 			    sp->lun_queue->lun_no);
1990 			comp_status = CS_COMPLETE;
1991 		}
1992 
1993 		/*
1994 		 * 2300 firmware marks completion status as data underrun
1995 		 * for scsi qfulls. Make it transport complete.
1996 		 */
1997 		if ((comp_status == CS_DATA_UNDERRUN) &&
1998 		    (pkt->scsi_status_l != 0)) {
1999 			comp_status = CS_COMPLETE;
2000 		}
2001 
2002 		/*
2003 		 * Workaround T3 issue where we do not get any data xferred
2004 		 * but get back a good status.
2005 		 */
2006 		if (comp_status == CS_COMPLETE &&
2007 		    pkt->scsi_status_l == 0 &&
2008 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
2009 		    pkt->residual_length != 0 &&
2010 		    sp->fcp &&
2011 		    sp->fcp->fcp_data_len != 0 &&
2012 		    sp->fcp->fcp_cntl.cntl_write_data) {
2013 			comp_status = CS_ABORTED;
2014 		}
2015 
2016 		/*
2017 		 * Fast path to good SCSI I/O completion
2018 		 */
2019 		if ((comp_status == CS_COMPLETE) &
2020 		    (!pkt->scsi_status_l) &
2021 		    (!(pkt->scsi_status_h & FCP_RSP_MASK))) {
2022 			/* Set completed status. */
2023 			sp->flags |= SRB_ISP_COMPLETED;
2024 			sp->pkt->pkt_reason = comp_status;
2025 			ql_fast_fcp_post(sp);
2026 			QL_PRINT_3(CE_CONT, "(%d): ql_fast_fcp_post done\n",
2027 			    ha->instance);
2028 			return (0);
2029 		}
2030 		rval = ql_status_error(ha, sp, (sts_entry_t *)pkt, done_q,
2031 		    set_flags, reset_flags);
2032 	}
2033 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2034 
2035 	return (rval);
2036 }
2037 
2038 /*
2039  * ql_verify_preprocessed_cmd
2040  *	Handles preprocessed cmds..
2041  *
2042  * Input:
2043  *	ha:		adapter state pointer.
2044  *	pkt_handle:	handle pointer.
2045  *	set_flags:	task daemon flags to set.
2046  *	reset_flags:	task daemon flags to reset.
2047  *
2048  * Returns:
2049  *	srb pointer or NULL
2050  *
2051  * Context:
2052  *	Interrupt or Kernel context, no mailbox commands allowed.
2053  */
2054 /* ARGSUSED */
2055 ql_srb_t *
ql_verify_preprocessed_cmd(ql_adapter_state_t * ha,uint32_t * pkt_handle,uint32_t * set_flags,uint32_t * reset_flags)2056 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, uint32_t *pkt_handle,
2057     uint32_t *set_flags, uint32_t *reset_flags)
2058 {
2059 	ql_srb_t		*sp = NULL;
2060 	uint32_t		index, resp_identifier;
2061 	uint32_t		get_handle = 10;
2062 
2063 	while (get_handle) {
2064 		/* Get handle. */
2065 		resp_identifier = ddi_get32(ha->hba_buf.acc_handle, pkt_handle);
2066 		index = resp_identifier & OSC_INDEX_MASK;
2067 		/* Validate handle. */
2068 		if (index < MAX_OUTSTANDING_COMMANDS) {
2069 			sp = ha->outstanding_cmds[index];
2070 		}
2071 
2072 		if (sp != NULL) {
2073 			EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2074 			    resp_identifier, get_handle, index);
2075 			break;
2076 		} else {
2077 			get_handle -= 1;
2078 			drv_usecwait(10000);
2079 			if (get_handle == 1) {
2080 				/* Last chance, Sync whole DMA buffer. */
2081 				(void) ddi_dma_sync(ha->hba_buf.dma_handle,
2082 				    RESPONSE_Q_BUFFER_OFFSET,
2083 				    RESPONSE_QUEUE_SIZE,
2084 				    DDI_DMA_SYNC_FORKERNEL);
2085 				EL(ha, "last chance DMA sync, index=%xh\n",
2086 				    index);
2087 			}
2088 		}
2089 	}
2090 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2091 
2092 	return (sp);
2093 }
2094 
2095 
2096 /*
2097  * ql_status_error
2098  *	Processes received ISP status entry error.
2099  *
2100  * Input:
2101  *	ha:		adapter state pointer.
2102  *	sp:		SRB pointer.
2103  *	pkt:		entry pointer.
2104  *	done_q:		done queue pointer.
2105  *	set_flags:	task daemon flags to set.
2106  *	reset_flags:	task daemon flags to reset.
2107  *
2108  * Returns:
2109  *	BIT_0 = CS_RESET status received.
2110  *
2111  * Context:
2112  *	Interrupt or Kernel context, no mailbox commands allowed.
2113  */
2114 /* ARGSUSED */
2115 static int
ql_status_error(ql_adapter_state_t * ha,ql_srb_t * sp,sts_entry_t * pkt23,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2116 ql_status_error(ql_adapter_state_t *ha, ql_srb_t *sp, sts_entry_t *pkt23,
2117     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2118 {
2119 	uint32_t		sense_sz = 0;
2120 	uint32_t		cnt;
2121 	ql_tgt_t		*tq;
2122 	fcp_rsp_t		*fcpr;
2123 	struct fcp_rsp_info	*rsp;
2124 	int			rval = 0;
2125 
2126 	struct {
2127 		uint8_t		*rsp_info;
2128 		uint8_t		*req_sense_data;
2129 		uint32_t	residual_length;
2130 		uint32_t	fcp_residual_length;
2131 		uint32_t	rsp_info_length;
2132 		uint32_t	req_sense_length;
2133 		uint16_t	comp_status;
2134 		uint8_t		state_flags_l;
2135 		uint8_t		state_flags_h;
2136 		uint8_t		scsi_status_l;
2137 		uint8_t		scsi_status_h;
2138 	} sts;
2139 
2140 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2141 
2142 	if (CFG_IST(ha, CFG_CTRL_24258081)) {
2143 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2144 
2145 		/* Setup status. */
2146 		sts.comp_status = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2147 		    &pkt24->comp_status);
2148 		sts.scsi_status_l = pkt24->scsi_status_l;
2149 		sts.scsi_status_h = pkt24->scsi_status_h;
2150 
2151 		/* Setup firmware residuals. */
2152 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2153 		    ddi_get32(ha->hba_buf.acc_handle,
2154 		    (uint32_t *)&pkt24->residual_length) : 0;
2155 
2156 		/* Setup FCP residuals. */
2157 		sts.fcp_residual_length = sts.scsi_status_h &
2158 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2159 		    ddi_get32(ha->hba_buf.acc_handle,
2160 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2161 
2162 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2163 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2164 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2165 
2166 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2167 			    sts.residual_length,
2168 			    pkt24->fcp_rsp_residual_count);
2169 			sts.scsi_status_h = (uint8_t)
2170 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2171 		}
2172 
2173 		/* Setup state flags. */
2174 		sts.state_flags_l = pkt24->state_flags_l;
2175 		sts.state_flags_h = pkt24->state_flags_h;
2176 
2177 		if (sp->fcp->fcp_data_len &&
2178 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2179 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2180 			sts.state_flags_h = (uint8_t)
2181 			    (sts.state_flags_h | SF_GOT_BUS |
2182 			    SF_GOT_TARGET | SF_SENT_CMD |
2183 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2184 		} else {
2185 			sts.state_flags_h = (uint8_t)
2186 			    (sts.state_flags_h | SF_GOT_BUS |
2187 			    SF_GOT_TARGET | SF_SENT_CMD |
2188 			    SF_GOT_STATUS);
2189 		}
2190 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2191 			sts.state_flags_l = (uint8_t)
2192 			    (sts.state_flags_l | SF_DATA_OUT);
2193 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2194 			sts.state_flags_l = (uint8_t)
2195 			    (sts.state_flags_l | SF_DATA_IN);
2196 		}
2197 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2198 			sts.state_flags_l = (uint8_t)
2199 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2200 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2201 			sts.state_flags_l = (uint8_t)
2202 			    (sts.state_flags_l | SF_ORDERED_Q);
2203 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2204 			sts.state_flags_l = (uint8_t)
2205 			    (sts.state_flags_l | SF_SIMPLE_Q);
2206 		}
2207 
2208 		/* Setup FCP response info. */
2209 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2210 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2211 			sts.rsp_info_length = ddi_get32(ha->hba_buf.acc_handle,
2212 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2213 			if (sts.rsp_info_length >
2214 			    sizeof (struct fcp_rsp_info)) {
2215 				sts.rsp_info_length =
2216 				    sizeof (struct fcp_rsp_info);
2217 			}
2218 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2219 				ql_chg_endian(sts.rsp_info + cnt, 4);
2220 			}
2221 		} else {
2222 			sts.rsp_info_length = 0;
2223 		}
2224 
2225 		/* Setup sense data. */
2226 		sts.req_sense_data =
2227 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2228 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2229 			sts.req_sense_length =
2230 			    ddi_get32(ha->hba_buf.acc_handle,
2231 			    (uint32_t *)&pkt24->fcp_sense_length);
2232 			sts.state_flags_h = (uint8_t)
2233 			    (sts.state_flags_h | SF_ARQ_DONE);
2234 			sense_sz = (uint32_t)
2235 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2236 			    (uintptr_t)sts.req_sense_data);
2237 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2238 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2239 			}
2240 		} else {
2241 			sts.req_sense_length = 0;
2242 		}
2243 	} else {
2244 		/* Setup status. */
2245 		sts.comp_status = (uint16_t)ddi_get16(
2246 		    ha->hba_buf.acc_handle, &pkt23->comp_status);
2247 		sts.scsi_status_l = pkt23->scsi_status_l;
2248 		sts.scsi_status_h = pkt23->scsi_status_h;
2249 
2250 		/* Setup firmware residuals. */
2251 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2252 		    ddi_get32(ha->hba_buf.acc_handle,
2253 		    (uint32_t *)&pkt23->residual_length) : 0;
2254 
2255 		/* Setup FCP residuals. */
2256 		sts.fcp_residual_length = sts.scsi_status_h &
2257 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2258 		    sts.residual_length : 0;
2259 
2260 		/* Setup state flags. */
2261 		sts.state_flags_l = pkt23->state_flags_l;
2262 		sts.state_flags_h = pkt23->state_flags_h;
2263 
2264 		/* Setup FCP response info. */
2265 		sts.rsp_info = &pkt23->rsp_info[0];
2266 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2267 			sts.rsp_info_length = ddi_get16(
2268 			    ha->hba_buf.acc_handle,
2269 			    (uint16_t *)&pkt23->rsp_info_length);
2270 			if (sts.rsp_info_length >
2271 			    sizeof (struct fcp_rsp_info)) {
2272 				sts.rsp_info_length =
2273 				    sizeof (struct fcp_rsp_info);
2274 			}
2275 		} else {
2276 			sts.rsp_info_length = 0;
2277 		}
2278 
2279 		/* Setup sense data. */
2280 		sts.req_sense_data = &pkt23->req_sense_data[0];
2281 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2282 		    ddi_get16(ha->hba_buf.acc_handle,
2283 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2284 	}
2285 
2286 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2287 
2288 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2289 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2290 	    sizeof (fcp_rsp_t));
2291 
2292 	tq = sp->lun_queue->target_queue;
2293 
2294 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2295 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2296 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2297 	}
2298 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2299 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2300 	}
2301 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2302 		fcpr->fcp_u.fcp_status.resid_over = 1;
2303 	}
2304 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2305 		fcpr->fcp_u.fcp_status.resid_under = 1;
2306 	}
2307 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2308 
2309 	/* Set ISP completion status */
2310 	sp->pkt->pkt_reason = sts.comp_status;
2311 
2312 	/* Update statistics. */
2313 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2314 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2315 
2316 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2317 		if (sense_sz > sts.rsp_info_length) {
2318 			sense_sz = sts.rsp_info_length;
2319 		}
2320 
2321 		/* copy response information data. */
2322 		if (sense_sz) {
2323 			ddi_rep_get8(ha->hba_buf.acc_handle, (uint8_t *)rsp,
2324 			    sts.rsp_info, sense_sz, DDI_DEV_AUTOINCR);
2325 		}
2326 		fcpr->fcp_response_len = sense_sz;
2327 
2328 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2329 		    fcpr->fcp_response_len);
2330 
2331 		switch (*(sts.rsp_info + 3)) {
2332 		case FCP_NO_FAILURE:
2333 			break;
2334 		case FCP_DL_LEN_MISMATCH:
2335 			ha->adapter_stats->d_stats[lobyte(
2336 			    tq->loop_id)].dl_len_mismatches++;
2337 			break;
2338 		case FCP_CMND_INVALID:
2339 			break;
2340 		case FCP_DATA_RO_MISMATCH:
2341 			ha->adapter_stats->d_stats[lobyte(
2342 			    tq->loop_id)].data_ro_mismatches++;
2343 			break;
2344 		case FCP_TASK_MGMT_NOT_SUPPTD:
2345 			break;
2346 		case FCP_TASK_MGMT_FAILED:
2347 			ha->adapter_stats->d_stats[lobyte(
2348 			    tq->loop_id)].task_mgmt_failures++;
2349 			break;
2350 		default:
2351 			break;
2352 		}
2353 	} else {
2354 		/*
2355 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2356 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2357 		 */
2358 		fcpr->fcp_response_len = 0;
2359 	}
2360 
2361 	/* Set reset status received. */
2362 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2363 		rval |= BIT_0;
2364 	}
2365 
2366 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2367 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2368 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2369 	    ha->task_daemon_flags & LOOP_DOWN) {
2370 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2371 		    tq->d_id.b24, sp->lun_queue->lun_no);
2372 
2373 		/* Set retry status. */
2374 		sp->flags |= SRB_RETRY;
2375 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2376 	    tq->port_down_retry_count != 0 &&
2377 	    (sts.comp_status == CS_INCOMPLETE ||
2378 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2379 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2380 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2381 	    sts.comp_status == CS_PORT_BUSY)) {
2382 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2383 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2384 		    tq->port_down_retry_count);
2385 
2386 		/* Set retry status. */
2387 		sp->flags |= SRB_RETRY;
2388 
2389 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2390 			/* Acquire device queue lock. */
2391 			DEVICE_QUEUE_LOCK(tq);
2392 
2393 			tq->flags |= TQF_QUEUE_SUSPENDED;
2394 
2395 			/* Decrement port down count. */
2396 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2397 				tq->port_down_retry_count--;
2398 			}
2399 
2400 			DEVICE_QUEUE_UNLOCK(tq);
2401 
2402 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2403 			    == 0 &&
2404 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2405 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2406 				sp->ha->adapter_stats->d_stats[lobyte(
2407 				    tq->loop_id)].logouts_recvd++;
2408 				ql_send_logo(sp->ha, tq, done_q);
2409 			}
2410 
2411 			ADAPTER_STATE_LOCK(ha);
2412 			if (ha->port_retry_timer == 0) {
2413 				if ((ha->port_retry_timer =
2414 				    ha->port_down_retry_delay) == 0) {
2415 					*set_flags |=
2416 					    PORT_RETRY_NEEDED;
2417 				}
2418 			}
2419 			ADAPTER_STATE_UNLOCK(ha);
2420 		}
2421 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2422 	    (sts.comp_status == CS_RESET ||
2423 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2424 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2425 		if (sts.comp_status == CS_RESET) {
2426 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2427 			    tq->d_id.b24, sp->lun_queue->lun_no);
2428 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2429 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2430 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2431 			    tq->qfull_retry_count);
2432 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2433 				tq->flags |= TQF_QUEUE_SUSPENDED;
2434 
2435 				tq->qfull_retry_count--;
2436 
2437 				ADAPTER_STATE_LOCK(ha);
2438 				if (ha->port_retry_timer == 0) {
2439 					if ((ha->port_retry_timer =
2440 					    ha->qfull_retry_delay) ==
2441 					    0) {
2442 						*set_flags |=
2443 						    PORT_RETRY_NEEDED;
2444 					}
2445 				}
2446 				ADAPTER_STATE_UNLOCK(ha);
2447 			}
2448 		} else {
2449 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2450 			    tq->d_id.b24, sp->lun_queue->lun_no);
2451 		}
2452 
2453 		/* Set retry status. */
2454 		sp->flags |= SRB_RETRY;
2455 	} else {
2456 		fcpr->fcp_resid =
2457 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2458 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2459 
2460 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2461 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2462 
2463 			if (sts.scsi_status_l == STATUS_CHECK) {
2464 				sp->pkt->pkt_reason = CS_COMPLETE;
2465 			} else {
2466 				EL(ha, "transport error - "
2467 				    "underrun & invalid resid\n");
2468 				EL(ha, "ssh=%xh, ssl=%xh\n",
2469 				    sts.scsi_status_h, sts.scsi_status_l);
2470 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2471 			}
2472 		}
2473 
2474 		/* Ignore firmware underrun error. */
2475 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2476 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2477 		    (sts.scsi_status_l != STATUS_CHECK &&
2478 		    sts.scsi_status_l != STATUS_GOOD))) {
2479 			sp->pkt->pkt_reason = CS_COMPLETE;
2480 		}
2481 
2482 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2483 			ha->xioctl->DeviceErrorCount++;
2484 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh"
2485 			    "\n", sts.comp_status, tq->d_id.b24,
2486 			    sp->lun_queue->lun_no);
2487 		}
2488 
2489 		/* Set target request sense data. */
2490 		if (sts.scsi_status_l == STATUS_CHECK) {
2491 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2492 
2493 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2494 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2495 				    sts.req_sense_data[2] !=
2496 				    KEY_UNIT_ATTENTION) {
2497 					ha->xioctl->DeviceErrorCount++;
2498 				}
2499 
2500 				sense_sz = sts.req_sense_length;
2501 
2502 				/* Insure data does not exceed buf. */
2503 				if (sp->pkt->pkt_rsplen <=
2504 				    (uint32_t)sizeof (fcp_rsp_t) +
2505 				    fcpr->fcp_response_len) {
2506 					sp->request_sense_length = 0;
2507 				} else {
2508 					sp->request_sense_length = (uint32_t)
2509 					    (sp->pkt->pkt_rsplen -
2510 					    sizeof (fcp_rsp_t) -
2511 					    fcpr->fcp_response_len);
2512 				}
2513 
2514 				if (sense_sz <
2515 				    sp->request_sense_length) {
2516 					sp->request_sense_length =
2517 					    sense_sz;
2518 				}
2519 
2520 				sp->request_sense_ptr = (caddr_t)rsp;
2521 
2522 				sense_sz = (uint32_t)
2523 				    (((uintptr_t)pkt23 +
2524 				    sizeof (sts_entry_t)) -
2525 				    (uintptr_t)sts.req_sense_data);
2526 				if (sp->request_sense_length <
2527 				    sense_sz) {
2528 					sense_sz =
2529 					    sp->request_sense_length;
2530 				}
2531 
2532 				fcpr->fcp_sense_len = sense_sz;
2533 
2534 				/* Move sense data. */
2535 				ddi_rep_get8(ha->hba_buf.acc_handle,
2536 				    (uint8_t *)sp->request_sense_ptr,
2537 				    sts.req_sense_data,
2538 				    (size_t)sense_sz,
2539 				    DDI_DEV_AUTOINCR);
2540 
2541 				sp->request_sense_ptr += sense_sz;
2542 				sp->request_sense_length -= sense_sz;
2543 				if (sp->request_sense_length != 0 &&
2544 				    !(CFG_IST(ha, CFG_CTRL_8021))) {
2545 					ha->status_srb = sp;
2546 				}
2547 			}
2548 
2549 			if (sense_sz != 0) {
2550 				EL(sp->ha, "check condition sense data, "
2551 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2552 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2553 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2554 				    sp->lun_queue->lun_no,
2555 				    sts.req_sense_data[0],
2556 				    sts.req_sense_data[1],
2557 				    sts.req_sense_data[2],
2558 				    sts.req_sense_data[3],
2559 				    sts.req_sense_data[4],
2560 				    sts.req_sense_data[5],
2561 				    sts.req_sense_data[6],
2562 				    sts.req_sense_data[7],
2563 				    sts.req_sense_data[8],
2564 				    sts.req_sense_data[9],
2565 				    sts.req_sense_data[10],
2566 				    sts.req_sense_data[11],
2567 				    sts.req_sense_data[12],
2568 				    sts.req_sense_data[13],
2569 				    sts.req_sense_data[14],
2570 				    sts.req_sense_data[15],
2571 				    sts.req_sense_data[16],
2572 				    sts.req_sense_data[17]);
2573 			} else {
2574 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2575 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2576 			}
2577 		}
2578 	}
2579 
2580 	/* Set completed status. */
2581 	sp->flags |= SRB_ISP_COMPLETED;
2582 
2583 	/* Place command on done queue. */
2584 	if (ha->status_srb == NULL) {
2585 		ql_add_link_b(done_q, &sp->cmd);
2586 	}
2587 
2588 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2589 
2590 	return (rval);
2591 }
2592 
2593 /*
2594  * ql_status_cont_entry
2595  *	Processes status continuation entry.
2596  *
2597  * Input:
2598  *	ha:		adapter state pointer.
2599  *	pkt:		entry pointer.
2600  *	done_q:		done queue pointer.
2601  *	set_flags:	task daemon flags to set.
2602  *	reset_flags:	task daemon flags to reset.
2603  *
2604  * Context:
2605  *	Interrupt or Kernel context, no mailbox commands allowed.
2606  */
2607 /* ARGSUSED */
2608 static void
ql_status_cont_entry(ql_adapter_state_t * ha,sts_cont_entry_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2609 ql_status_cont_entry(ql_adapter_state_t *ha, sts_cont_entry_t *pkt,
2610     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2611 {
2612 	uint32_t	sense_sz, index;
2613 	ql_srb_t	*sp = ha->status_srb;
2614 
2615 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2616 
2617 	if (sp != NULL && sp->request_sense_length) {
2618 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2619 			sense_sz = sizeof (pkt->req_sense_data);
2620 		} else {
2621 			sense_sz = sp->request_sense_length;
2622 		}
2623 
2624 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2625 			for (index = 0; index < sense_sz; index += 4) {
2626 				ql_chg_endian((uint8_t *)
2627 				    &pkt->req_sense_data[0] + index, 4);
2628 			}
2629 		}
2630 
2631 		/* Move sense data. */
2632 		ddi_rep_get8(ha->hba_buf.acc_handle,
2633 		    (uint8_t *)sp->request_sense_ptr,
2634 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2635 		    DDI_DEV_AUTOINCR);
2636 
2637 		sp->request_sense_ptr += sense_sz;
2638 		sp->request_sense_length -= sense_sz;
2639 
2640 		/* Place command on done queue. */
2641 		if (sp->request_sense_length == 0) {
2642 			ql_add_link_b(done_q, &sp->cmd);
2643 			ha->status_srb = NULL;
2644 		}
2645 	}
2646 
2647 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2648 }
2649 
2650 /*
2651  * ql_ip_entry
2652  *	Processes received ISP IP entry.
2653  *
2654  * Input:
2655  *	ha:		adapter state pointer.
2656  *	pkt:		entry pointer.
2657  *	done_q:		done queue pointer.
2658  *	set_flags:	task daemon flags to set.
2659  *	reset_flags:	task daemon flags to reset.
2660  *
2661  * Context:
2662  *	Interrupt or Kernel context, no mailbox commands allowed.
2663  */
2664 /* ARGSUSED */
2665 static void
ql_ip_entry(ql_adapter_state_t * ha,ip_entry_t * pkt23,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2666 ql_ip_entry(ql_adapter_state_t *ha, ip_entry_t *pkt23, ql_head_t *done_q,
2667     uint32_t *set_flags, uint32_t *reset_flags)
2668 {
2669 	ql_srb_t	*sp;
2670 	uint32_t	index, resp_identifier;
2671 	ql_tgt_t	*tq;
2672 
2673 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2674 
2675 	/* Validate the response entry handle. */
2676 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2677 	index = resp_identifier & OSC_INDEX_MASK;
2678 	if (index < MAX_OUTSTANDING_COMMANDS) {
2679 		/* the index seems reasonable */
2680 		sp = ha->outstanding_cmds[index];
2681 		if (sp != NULL) {
2682 			if (sp->handle == resp_identifier) {
2683 				/* Neo, you're the one... */
2684 				ha->outstanding_cmds[index] = NULL;
2685 				sp->handle = 0;
2686 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2687 			} else {
2688 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2689 				    resp_identifier, sp->handle);
2690 				sp = NULL;
2691 				ql_signal_abort(ha, set_flags);
2692 			}
2693 		} else {
2694 			sp = ql_verify_preprocessed_cmd(ha,
2695 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
2696 		}
2697 	} else {
2698 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2699 		    index, resp_identifier);
2700 		ql_signal_abort(ha, set_flags);
2701 	}
2702 
2703 	if (sp != NULL) {
2704 		tq = sp->lun_queue->target_queue;
2705 
2706 		/* Set ISP completion status */
2707 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
2708 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2709 
2710 			sp->pkt->pkt_reason = ddi_get16(
2711 			    ha->hba_buf.acc_handle, &pkt24->hdl_status);
2712 		} else {
2713 			sp->pkt->pkt_reason = ddi_get16(
2714 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
2715 		}
2716 
2717 		if (ha->task_daemon_flags & LOOP_DOWN) {
2718 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2719 			    tq->d_id.b24);
2720 
2721 			/* Set retry status. */
2722 			sp->flags |= SRB_RETRY;
2723 
2724 		} else if (tq->port_down_retry_count &&
2725 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2726 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2727 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2728 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2729 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2730 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2731 			    sp->pkt->pkt_reason, tq->d_id.b24,
2732 			    tq->port_down_retry_count);
2733 
2734 			/* Set retry status. */
2735 			sp->flags |= SRB_RETRY;
2736 
2737 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2738 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2739 				ha->adapter_stats->d_stats[lobyte(
2740 				    tq->loop_id)].logouts_recvd++;
2741 				ql_send_logo(ha, tq, done_q);
2742 			}
2743 
2744 			/* Acquire device queue lock. */
2745 			DEVICE_QUEUE_LOCK(tq);
2746 
2747 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2748 				tq->flags |= TQF_QUEUE_SUSPENDED;
2749 
2750 				tq->port_down_retry_count--;
2751 
2752 				ADAPTER_STATE_LOCK(ha);
2753 				if (ha->port_retry_timer == 0) {
2754 					if ((ha->port_retry_timer =
2755 					    ha->port_down_retry_delay) == 0) {
2756 						*set_flags |=
2757 						    PORT_RETRY_NEEDED;
2758 					}
2759 				}
2760 				ADAPTER_STATE_UNLOCK(ha);
2761 			}
2762 
2763 			/* Release device queue specific lock. */
2764 			DEVICE_QUEUE_UNLOCK(tq);
2765 
2766 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2767 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2768 
2769 			/* Set retry status. */
2770 			sp->flags |= SRB_RETRY;
2771 		} else {
2772 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2773 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2774 				    sp->pkt->pkt_reason, tq->d_id.b24);
2775 			}
2776 		}
2777 
2778 		/* Set completed status. */
2779 		sp->flags |= SRB_ISP_COMPLETED;
2780 
2781 		ql_add_link_b(done_q, &sp->cmd);
2782 
2783 	}
2784 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2785 }
2786 
2787 /*
2788  * ql_ip_rcv_entry
2789  *	Processes received ISP IP buffers entry.
2790  *
2791  * Input:
2792  *	ha:		adapter state pointer.
2793  *	pkt:		entry pointer.
2794  *	done_q:		done queue pointer.
2795  *	set_flags:	task daemon flags to set.
2796  *	reset_flags:	task daemon flags to reset.
2797  *
2798  * Context:
2799  *	Interrupt or Kernel context, no mailbox commands allowed.
2800  */
2801 /* ARGSUSED */
2802 static void
ql_ip_rcv_entry(ql_adapter_state_t * ha,ip_rcv_entry_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2803 ql_ip_rcv_entry(ql_adapter_state_t *ha, ip_rcv_entry_t *pkt,
2804     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2805 {
2806 	port_id_t	s_id;
2807 	uint16_t	index;
2808 	uint8_t		cnt;
2809 	ql_tgt_t	*tq;
2810 
2811 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2812 
2813 	/* Locate device queue. */
2814 	s_id.b.al_pa = pkt->s_id[0];
2815 	s_id.b.area = pkt->s_id[1];
2816 	s_id.b.domain = pkt->s_id[2];
2817 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2818 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2819 		return;
2820 	}
2821 
2822 	tq->ub_sequence_length = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2823 	    &pkt->seq_length);
2824 	tq->ub_total_seg_cnt = pkt->segment_count;
2825 	tq->ub_seq_id = ++ha->ub_seq_id;
2826 	tq->ub_seq_cnt = 0;
2827 	tq->ub_frame_ro = 0;
2828 	tq->ub_loop_id = pkt->loop_id;
2829 	ha->rcv_dev_q = tq;
2830 
2831 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2832 	    tq->ub_total_seg_cnt; cnt++) {
2833 
2834 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2835 		    &pkt->buffer_handle[cnt]);
2836 
2837 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2838 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2839 			*set_flags |= ISP_ABORT_NEEDED;
2840 			break;
2841 		}
2842 	}
2843 
2844 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2845 }
2846 
2847 /*
2848  * ql_ip_rcv_cont_entry
2849  *	Processes received ISP IP buffers continuation entry.
2850  *
2851  * Input:
2852  *	ha:		adapter state pointer.
2853  *	pkt:		entry pointer.
2854  *	done_q:		done queue pointer.
2855  *	set_flags:	task daemon flags to set.
2856  *	reset_flags:	task daemon flags to reset.
2857  *
2858  * Context:
2859  *	Interrupt or Kernel context, no mailbox commands allowed.
2860  */
2861 /* ARGSUSED */
2862 static void
ql_ip_rcv_cont_entry(ql_adapter_state_t * ha,ip_rcv_cont_entry_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2863 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ip_rcv_cont_entry_t *pkt,
2864     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2865 {
2866 	uint16_t	index;
2867 	uint8_t		cnt;
2868 	ql_tgt_t	*tq;
2869 
2870 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2871 
2872 	if ((tq = ha->rcv_dev_q) == NULL) {
2873 		EL(ha, "No IP receive device\n");
2874 		return;
2875 	}
2876 
2877 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
2878 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
2879 
2880 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2881 		    &pkt->buffer_handle[cnt]);
2882 
2883 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2884 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2885 			*set_flags |= ISP_ABORT_NEEDED;
2886 			break;
2887 		}
2888 	}
2889 
2890 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2891 }
2892 
2893 /*
2894  * ip_rcv_24xx_entry_t
2895  *	Processes received ISP24xx IP buffers entry.
2896  *
2897  * Input:
2898  *	ha:		adapter state pointer.
2899  *	pkt:		entry pointer.
2900  *	done_q:		done queue pointer.
2901  *	set_flags:	task daemon flags to set.
2902  *	reset_flags:	task daemon flags to reset.
2903  *
2904  * Context:
2905  *	Interrupt or Kernel context, no mailbox commands allowed.
2906  */
2907 /* ARGSUSED */
2908 static void
ql_ip_24xx_rcv_entry(ql_adapter_state_t * ha,ip_rcv_24xx_entry_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2909 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ip_rcv_24xx_entry_t *pkt,
2910     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
2911 {
2912 	port_id_t	s_id;
2913 	uint16_t	index;
2914 	uint8_t		cnt;
2915 	ql_tgt_t	*tq;
2916 
2917 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2918 
2919 	/* Locate device queue. */
2920 	s_id.b.al_pa = pkt->s_id[0];
2921 	s_id.b.area = pkt->s_id[1];
2922 	s_id.b.domain = pkt->s_id[2];
2923 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2924 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2925 		return;
2926 	}
2927 
2928 	if (tq->ub_total_seg_cnt == 0) {
2929 		tq->ub_sequence_length = (uint16_t)ddi_get16(
2930 		    ha->hba_buf.acc_handle, &pkt->seq_length);
2931 		tq->ub_total_seg_cnt = pkt->segment_count;
2932 		tq->ub_seq_id = ++ha->ub_seq_id;
2933 		tq->ub_seq_cnt = 0;
2934 		tq->ub_frame_ro = 0;
2935 		tq->ub_loop_id = (uint16_t)ddi_get16(
2936 		    ha->hba_buf.acc_handle, &pkt->n_port_hdl);
2937 	}
2938 
2939 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
2940 	    tq->ub_total_seg_cnt; cnt++) {
2941 
2942 		index = (uint16_t)ddi_get16(ha->hba_buf.acc_handle,
2943 		    &pkt->buffer_handle[cnt]);
2944 
2945 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2946 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2947 			*set_flags |= ISP_ABORT_NEEDED;
2948 			break;
2949 		}
2950 	}
2951 
2952 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2953 }
2954 
2955 /*
2956  * ql_ms_entry
2957  *	Processes received Name/Management/CT Pass-Through entry.
2958  *
2959  * Input:
2960  *	ha:		adapter state pointer.
2961  *	pkt23:		entry pointer.
2962  *	done_q:		done queue pointer.
2963  *	set_flags:	task daemon flags to set.
2964  *	reset_flags:	task daemon flags to reset.
2965  *
2966  * Context:
2967  *	Interrupt or Kernel context, no mailbox commands allowed.
2968  */
2969 /* ARGSUSED */
2970 static void
ql_ms_entry(ql_adapter_state_t * ha,ms_entry_t * pkt23,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)2971 ql_ms_entry(ql_adapter_state_t *ha, ms_entry_t *pkt23, ql_head_t *done_q,
2972     uint32_t *set_flags, uint32_t *reset_flags)
2973 {
2974 	ql_srb_t		*sp;
2975 	uint32_t		index, cnt, resp_identifier;
2976 	ql_tgt_t		*tq;
2977 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
2978 
2979 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2980 
2981 	/* Validate the response entry handle. */
2982 	resp_identifier = ddi_get32(ha->hba_buf.acc_handle, &pkt23->handle);
2983 	index = resp_identifier & OSC_INDEX_MASK;
2984 	if (index < MAX_OUTSTANDING_COMMANDS) {
2985 		/* the index seems reasonable */
2986 		sp = ha->outstanding_cmds[index];
2987 		if (sp != NULL) {
2988 			if (sp->handle == resp_identifier) {
2989 				/* Neo, you're the one... */
2990 				ha->outstanding_cmds[index] = NULL;
2991 				sp->handle = 0;
2992 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2993 			} else {
2994 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2995 				    resp_identifier, sp->handle);
2996 				sp = NULL;
2997 				ql_signal_abort(ha, set_flags);
2998 			}
2999 		} else {
3000 			sp = ql_verify_preprocessed_cmd(ha,
3001 			    (uint32_t *)&pkt23->handle, set_flags, reset_flags);
3002 		}
3003 	} else {
3004 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3005 		    index, resp_identifier);
3006 		ql_signal_abort(ha, set_flags);
3007 	}
3008 
3009 	if (sp != NULL) {
3010 		if (!(sp->flags & SRB_MS_PKT)) {
3011 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3012 			    sp->flags);
3013 			*set_flags |= ISP_ABORT_NEEDED;
3014 			return;
3015 		}
3016 
3017 		tq = sp->lun_queue->target_queue;
3018 
3019 		/* Set ISP completion status */
3020 		if (CFG_IST(ha, CFG_CTRL_24258081)) {
3021 			sp->pkt->pkt_reason = ddi_get16(
3022 			    ha->hba_buf.acc_handle, &pkt24->status);
3023 		} else {
3024 			sp->pkt->pkt_reason = ddi_get16(
3025 			    ha->hba_buf.acc_handle, &pkt23->comp_status);
3026 		}
3027 
3028 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3029 		    sp->retry_count) {
3030 			EL(ha, "Resouce Unavailable Retry = %d\n",
3031 			    sp->retry_count);
3032 
3033 			/* Set retry status. */
3034 			sp->retry_count--;
3035 			sp->flags |= SRB_RETRY;
3036 
3037 			/* Acquire device queue lock. */
3038 			DEVICE_QUEUE_LOCK(tq);
3039 
3040 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3041 				tq->flags |= TQF_QUEUE_SUSPENDED;
3042 
3043 				ADAPTER_STATE_LOCK(ha);
3044 				if (ha->port_retry_timer == 0) {
3045 					ha->port_retry_timer = 2;
3046 				}
3047 				ADAPTER_STATE_UNLOCK(ha);
3048 			}
3049 
3050 			/* Release device queue specific lock. */
3051 			DEVICE_QUEUE_UNLOCK(tq);
3052 
3053 		} else if (tq->port_down_retry_count &&
3054 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3055 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3056 			EL(ha, "Port Down Retry\n");
3057 
3058 			/* Set retry status. */
3059 			sp->flags |= SRB_RETRY;
3060 
3061 			/* Acquire device queue lock. */
3062 			DEVICE_QUEUE_LOCK(tq);
3063 
3064 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3065 				tq->flags |= TQF_QUEUE_SUSPENDED;
3066 
3067 				tq->port_down_retry_count--;
3068 
3069 				ADAPTER_STATE_LOCK(ha);
3070 				if (ha->port_retry_timer == 0) {
3071 					if ((ha->port_retry_timer =
3072 					    ha->port_down_retry_delay) == 0) {
3073 						*set_flags |=
3074 						    PORT_RETRY_NEEDED;
3075 					}
3076 				}
3077 				ADAPTER_STATE_UNLOCK(ha);
3078 			}
3079 			/* Release device queue specific lock. */
3080 			DEVICE_QUEUE_UNLOCK(tq);
3081 
3082 		} else if (sp->pkt->pkt_reason == CS_RESET) {
3083 			EL(ha, "Reset Retry\n");
3084 
3085 			/* Set retry status. */
3086 			sp->flags |= SRB_RETRY;
3087 
3088 		} else if (CFG_IST(ha, CFG_CTRL_24258081) &&
3089 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3090 			cnt = ddi_get32(ha->hba_buf.acc_handle,
3091 			    &pkt24->resp_byte_count);
3092 			if (cnt < sizeof (fc_ct_header_t)) {
3093 				EL(ha, "Data underrun\n");
3094 			} else {
3095 				sp->pkt->pkt_reason = CS_COMPLETE;
3096 			}
3097 
3098 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3099 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3100 		}
3101 
3102 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
3103 			/*EMPTY*/
3104 			QL_PRINT_3(CE_CONT, "(%d): ct_cmdrsp=%x%02xh resp\n",
3105 			    ha->instance, sp->pkt->pkt_cmd[8],
3106 			    sp->pkt->pkt_cmd[9]);
3107 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3108 		}
3109 
3110 		/* For nameserver restore command, management change header. */
3111 		if ((sp->flags & SRB_RETRY) == 0) {
3112 			tq->d_id.b24 == 0xfffffc ?
3113 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3114 			    sp->pkt->pkt_cmd, B_TRUE) :
3115 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3116 			    sp->pkt->pkt_resp, B_TRUE);
3117 		}
3118 
3119 		/* Set completed status. */
3120 		sp->flags |= SRB_ISP_COMPLETED;
3121 
3122 		/* Place command on done queue. */
3123 		ql_add_link_b(done_q, &sp->cmd);
3124 
3125 	}
3126 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3127 }
3128 
3129 /*
3130  * ql_report_id_entry
3131  *	Processes received Name/Management/CT Pass-Through entry.
3132  *
3133  * Input:
3134  *	ha:		adapter state pointer.
3135  *	pkt:		entry pointer.
3136  *	done_q:		done queue pointer.
3137  *	set_flags:	task daemon flags to set.
3138  *	reset_flags:	task daemon flags to reset.
3139  *
3140  * Context:
3141  *	Interrupt or Kernel context, no mailbox commands allowed.
3142  */
3143 /* ARGSUSED */
3144 static void
ql_report_id_entry(ql_adapter_state_t * ha,report_id_1_t * pkt,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)3145 ql_report_id_entry(ql_adapter_state_t *ha, report_id_1_t *pkt,
3146     ql_head_t *done_q, uint32_t *set_flags, uint32_t *reset_flags)
3147 {
3148 	ql_adapter_state_t	*vha;
3149 
3150 	QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3151 
3152 	EL(ha, "format=%d, vp=%d, status=%d\n",
3153 	    pkt->format, pkt->vp_index, pkt->status);
3154 
3155 	if (pkt->format == 1) {
3156 		/* Locate port state structure. */
3157 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3158 			if (vha->vp_index == pkt->vp_index) {
3159 				break;
3160 			}
3161 		}
3162 		if (vha != NULL && vha->vp_index != 0 &&
3163 		    (pkt->status == CS_COMPLETE ||
3164 		    pkt->status == CS_PORT_ID_CHANGE)) {
3165 			*set_flags |= LOOP_RESYNC_NEEDED;
3166 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
3167 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
3168 			TASK_DAEMON_LOCK(ha);
3169 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
3170 			vha->task_daemon_flags &= ~LOOP_DOWN;
3171 			TASK_DAEMON_UNLOCK(ha);
3172 		}
3173 	}
3174 
3175 	QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3176 }
3177 
3178 /*
3179  * ql_els_entry
3180  *	Processes received ELS Pass-Through entry.
3181  *
3182  * Input:
3183  *	ha:		adapter state pointer.
3184  *	pkt23:		entry pointer.
3185  *	done_q:		done queue pointer.
3186  *	set_flags:	task daemon flags to set.
3187  *	reset_flags:	task daemon flags to reset.
3188  *
3189  * Context:
3190  *	Interrupt or Kernel context, no mailbox commands allowed.
3191  */
3192 /* ARGSUSED */
3193 static void
ql_els_passthru_entry(ql_adapter_state_t * ha,els_passthru_entry_rsp_t * rsp,ql_head_t * done_q,uint32_t * set_flags,uint32_t * reset_flags)3194