1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* Copyright 2015 QLogic Corporation */
23 
24 /*
25  * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26  */
27 
28 /*
29  * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
30  *
31  * ***********************************************************************
32  * *									**
33  * *				NOTICE					**
34  * *		COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION		**
35  * *			ALL RIGHTS RESERVED				**
36  * *									**
37  * ***********************************************************************
38  *
39  */
40 
41 #include <ql_apps.h>
42 #include <ql_api.h>
43 #include <ql_debug.h>
44 #include <ql_iocb.h>
45 #include <ql_isr.h>
46 #include <ql_init.h>
47 #include <ql_mbx.h>
48 #include <ql_nx.h>
49 #include <ql_xioctl.h>
50 #include <ql_fm.h>
51 
52 /*
53  * Local Function Prototypes.
54  */
55 static void ql_clr_risc_intr(ql_adapter_state_t *);
56 static void ql_handle_uncommon_risc_intr(ql_adapter_state_t *, int, uint32_t,
57     uint64_t *);
58 static void ql_mbx_completion(ql_adapter_state_t *, uint16_t, uint64_t *,
59     uint64_t *);
60 static void ql_async_event(ql_adapter_state_t *, ql_response_q_t *, uint32_t,
61     ql_head_t *, uint64_t *, uint64_t *);
62 static void ql_fast_fcp_post(ql_srb_t *, ql_response_q_t *);
63 static void ql_response_pkt(ql_adapter_state_t *, ql_response_q_t *,
64     ql_head_t *, uint64_t *, uint64_t *);
65 static void ql_error_entry(ql_adapter_state_t *, ql_response_q_t *,
66     response_t *, ql_head_t *, uint64_t *, uint64_t *);
67 static int ql_status_entry(ql_adapter_state_t *, ql_response_q_t *,
68     sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
69 static int ql_24xx_status_entry(ql_adapter_state_t *, ql_response_q_t *,
70     sts_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
71 static int ql_status_error(ql_adapter_state_t *, ql_response_q_t *, ql_srb_t *,
72     sts_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
73 static void ql_status_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
74     sts_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
75 static void ql_ip_entry(ql_adapter_state_t *, ql_response_q_t *, ip_entry_t *,
76     ql_head_t *, uint64_t *, uint64_t *);
77 static void ql_ip_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
78     ip_rcv_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
79 static void ql_ip_rcv_cont_entry(ql_adapter_state_t *, ql_response_q_t *,
80     ip_rcv_cont_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
81 static void ql_ip_24xx_rcv_entry(ql_adapter_state_t *, ql_response_q_t *,
82     ip_rcv_24xx_entry_t *, ql_head_t *, uint64_t *, uint64_t *);
83 static void ql_ms_entry(ql_adapter_state_t *, ql_response_q_t *, ms_entry_t *,
84     ql_head_t *, uint64_t *, uint64_t *);
85 static void ql_report_id_entry(ql_adapter_state_t *, ql_response_q_t *,
86     report_id_acq_t *, ql_head_t *, uint64_t *, uint64_t *);
87 static void ql_els_passthru_entry(ql_adapter_state_t *, ql_response_q_t *,
88     els_passthru_entry_rsp_t *, ql_head_t *, uint64_t *, uint64_t *);
89 static ql_srb_t *ql_verify_preprocessed_cmd(ql_adapter_state_t *,
90     ql_response_q_t *, uint32_t *, uint32_t *, uint64_t *, uint64_t *);
91 static void ql_signal_abort(ql_adapter_state_t *, uint64_t *);
92 
93 /*
94  * ql_disable_intr
95  *	Disable interrupts.
96  *
97  * Input:
98  *	ha:	adapter state pointer.
99  *
100  * Context:
101  *	Interrupt or Kernel context, no mailbox commands allowed.
102  */
103 void
ql_disable_intr(ql_adapter_state_t * ha)104 ql_disable_intr(ql_adapter_state_t *ha)
105 {
106 	int	i, rval;
107 
108 	QL_PRINT_10(ha, "started\n");
109 
110 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
111 		ql_8021_disable_intrs(ha);
112 	} else {
113 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
114 			WRT32_IO_REG(ha, ictrl, 0);
115 			(void) RD32_IO_REG(ha, ictrl);	/* PCI posting */
116 		} else {
117 			WRT16_IO_REG(ha, ictrl, 0);
118 			(void) RD16_IO_REG(ha, ictrl);	/* PCI posting */
119 		}
120 	}
121 	if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
122 		for (i = 0; i < ha->intr_cnt; i++) {
123 			QL_PRINT_10(ha, "intr_set_mask %d\n", i);
124 			if ((rval = ddi_intr_set_mask(ha->htable[i])) !=
125 			    DDI_SUCCESS) {
126 				EL(ha, "intr_set_mask status=%xh\n", rval);
127 			}
128 		}
129 	}
130 	ADAPTER_STATE_LOCK(ha);
131 	ha->flags &= ~INTERRUPTS_ENABLED;
132 	ADAPTER_STATE_UNLOCK(ha);
133 
134 	QL_PRINT_10(ha, "done\n");
135 }
136 
137 /*
138  * ql_enaable_intr
139  *	Enable interrupts.
140  *
141  * Input:
142  *	ha:	adapter state pointer.
143  *
144  * Context:
145  *	Interrupt or Kernel context, no mailbox commands allowed.
146  */
147 void
ql_enable_intr(ql_adapter_state_t * ha)148 ql_enable_intr(ql_adapter_state_t *ha)
149 {
150 	int	i, rval;
151 
152 	QL_PRINT_10(ha, "started\n");
153 
154 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
155 		ql_8021_enable_intrs(ha);
156 	} else {
157 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
158 			WRT32_IO_REG(ha, ictrl, ISP_EN_RISC);
159 			(void) RD32_IO_REG(ha, ictrl);	/* PCI posting */
160 		} else {
161 			WRT16_IO_REG(ha, ictrl, ISP_EN_INT + ISP_EN_RISC);
162 			(void) RD16_IO_REG(ha, ictrl);	/* PCI posting */
163 		}
164 	}
165 	if (ha->intr_cap & DDI_INTR_FLAG_MASKABLE) {
166 		for (i = 0; i < ha->intr_cnt; i++) {
167 			QL_PRINT_10(ha, "intr_clr_mask %d\n", i);
168 			if ((rval = ddi_intr_clr_mask(ha->htable[i])) !=
169 			    DDI_SUCCESS) {
170 				EL(ha, "intr_clr_mask status=%xh\n", rval);
171 			}
172 		}
173 	}
174 	ADAPTER_STATE_LOCK(ha);
175 	ha->flags |= INTERRUPTS_ENABLED;
176 	ADAPTER_STATE_UNLOCK(ha);
177 
178 	QL_PRINT_10(ha, "done\n");
179 }
180 
181 /*
182  * ql_clr_risc_intr
183  *	Clear firmware interrupt.
184  *
185  * Input:
186  *	ha:	adapter state pointer.
187  *
188  * Context:
189  *	Interrupt or Kernel context, no mailbox commands allowed.
190  */
191 static void
ql_clr_risc_intr(ql_adapter_state_t * ha)192 ql_clr_risc_intr(ql_adapter_state_t *ha)
193 {
194 	QL_PRINT_3(ha, "started\n");
195 
196 	if (CFG_IST(ha, CFG_CTRL_82XX)) {
197 		ql_8021_clr_fw_intr(ha);
198 	} else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
199 		WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
200 		RD32_IO_REG(ha, hccr);	/* PCI posting. */
201 	} else {
202 		WRT16_IO_REG(ha, semaphore, 0);
203 		WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
204 		RD16_IO_REG(ha, hccr);  /* PCI posting. */
205 	}
206 
207 	QL_PRINT_3(ha, "done\n");
208 }
209 
210 /*
211  * ql_isr
212  *	Process all INTX intr types.
213  *
214  * Input:
215  *	arg1:	adapter state pointer.
216  *
217  * Returns:
218  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
219  *
220  * Context:
221  *	Interrupt or Kernel context, no mailbox commands allowed.
222  */
223 /* ARGSUSED */
224 uint_t
ql_isr(caddr_t arg1)225 ql_isr(caddr_t arg1)
226 {
227 	return (ql_isr_aif(arg1, 0));
228 }
229 
230 /*
231  * ql_isr_aif
232  *	Process mailbox and I/O command completions.
233  *
234  * Input:
235  *	arg:	adapter state pointer.
236  *	arg2:	interrupt vector.
237  *
238  * Returns:
239  *	DDI_INTR_CLAIMED or DDI_INTR_UNCLAIMED
240  *
241  * Context:
242  *	Interrupt or Kernel context, no mailbox commands allowed.
243  */
244 /* ARGSUSED */
245 uint_t
ql_isr_aif(caddr_t arg,caddr_t arg2)246 ql_isr_aif(caddr_t arg, caddr_t arg2)
247 {
248 	uint32_t		mbx, stat;
249 	ql_adapter_state_t	*ha = (void *)arg;
250 	uint64_t		set_flags = 0, reset_flags = 0;
251 	ql_head_t		isr_done_q = {NULL, NULL};
252 	uint_t			rval = DDI_INTR_UNCLAIMED;
253 	ql_response_q_t		*rsp_q = NULL;
254 	int			intr, index = (int)((uintptr_t)arg2);
255 
256 	QL_PRINT_3(ha, "started, index=%d\n", index);
257 
258 	/* Exit if not attached. */
259 	if (ha == NULL || ha->intr_pri == NULL) {
260 		EL(ha, "ha=%p, intr_pri=%p not attached\n", (void *)ha,
261 		    ha != NULL ? ha->intr_pri : NULL);
262 		return (DDI_INTR_UNCLAIMED);
263 	}
264 
265 	/* Exit if chip not powered up. */
266 	if (ha->power_level != PM_LEVEL_D0) {
267 		EL(ha, "power down exit\n");
268 		return (DDI_INTR_UNCLAIMED);
269 	}
270 	QL_PM_LOCK(ha);
271 	ha->pm_busy++;
272 	QL_PM_UNLOCK(ha);
273 
274 	/* Acquire interrupt lock. */
275 	if (index > ha->rsp_queues_cnt) {
276 		intr = index = 0;
277 	} else if (index) {
278 		intr = index - 1;
279 	} else {
280 		intr = 0;
281 	}
282 	INDX_INTR_LOCK(ha, intr);
283 
284 	if (index && ha->flags & NO_INTR_HANDSHAKE) {
285 		QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE, index=%xh\n", index);
286 		index--;
287 		if (index < ha->rsp_queues_cnt) {
288 			rsp_q = ha->rsp_queues[index];
289 		}
290 		if (rsp_q == NULL) {
291 			EL(ha, "unsupported MULTI_Q_RSP_UPDATE, index=%d\n",
292 			    index);
293 			rsp_q = ha->rsp_queues[0];
294 		}
295 
296 		if (ha->flags & QUEUE_SHADOW_PTRS) {
297 			(void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
298 			    (off_t)rsp_q->rsp_in_shadow_ofst,
299 			    SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
300 			mbx = ddi_get32(rsp_q->rsp_ring.acc_handle,
301 			    rsp_q->rsp_in_shadow_ptr);
302 		} else {
303 			mbx = RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
304 		}
305 
306 		if (mbx != rsp_q->rsp_ring_index) {
307 			rsp_q->isp_rsp_index = (uint16_t)mbx;
308 			ql_response_pkt(ha, rsp_q, &isr_done_q,
309 			    &set_flags, &reset_flags);
310 			/* PCI posting */
311 			(void) RD32_MBAR_REG(ha, rsp_q->mbar_rsp_in);
312 		} else if (ha->flags & INTERRUPTS_ENABLED) {
313 			/*EMPTY*/
314 			QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbar_rsp_in "
315 			    "same as before\n");
316 		}
317 
318 		/* Set interrupt claimed status. */
319 		rval = DDI_INTR_CLAIMED;
320 
321 	} else if (CFG_IST(ha, CFG_CTRL_22XX)) {
322 		rsp_q = ha->rsp_queues[0];
323 		if (RD16_IO_REG(ha, istatus) & RISC_INT) {
324 			rval = DDI_INTR_CLAIMED;
325 
326 			/* Check for mailbox interrupt. */
327 			stat = RD16_IO_REG(ha, semaphore);
328 			if (stat & BIT_0) {
329 				/* Get mailbox data. */
330 				mbx = RD16_IO_REG(ha, mailbox_out[0]);
331 				if (mbx > 0x3fff && mbx < 0x8000) {
332 					ql_mbx_completion(ha, mbx,
333 					    &set_flags, &reset_flags);
334 				} else if (mbx > 0x7fff && mbx < 0xc000) {
335 					ql_async_event(ha, rsp_q, mbx,
336 					    &isr_done_q, &set_flags,
337 					    &reset_flags);
338 				} else {
339 					EL(ha, "22XX unknown interrupt type\n");
340 				}
341 			} else {
342 				rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
343 				if (rsp_q->isp_rsp_index !=
344 				    rsp_q->rsp_ring_index) {
345 					ql_response_pkt(ha, rsp_q,
346 					    &isr_done_q, &set_flags,
347 					    &reset_flags);
348 				} else {
349 					/*EMPTY*/
350 					QL_PRINT_10(ha, "22XX isp_rsp_index "
351 					    "same as before\n");
352 				}
353 			}
354 			/* Clear RISC interrupt */
355 			ql_clr_risc_intr(ha);
356 		}
357 	} else {
358 		if (CFG_IST(ha, CFG_CTRL_82XX)) {
359 			ql_8021_clr_hw_intr(ha);
360 		}
361 
362 		if (((stat = RD32_IO_REG(ha, risc2host)) & RH_RISC_INT) == 0) {
363 			QL_PRINT_10(ha, "done, index=%d, no interrupt "
364 			    "stat=%xh\n", index, stat);
365 			rval = DDI_INTR_UNCLAIMED;
366 		} else if (ha->ql_dump_state & QL_DUMPING) {
367 			EL(ha, "fw_dump, index=%d, active stat=%xh\n",
368 			    index, stat);
369 			rval = DDI_INTR_CLAIMED;
370 		} else if (CFG_IST(ha, CFG_CTRL_82XX) &&
371 		    RD32_IO_REG(ha, nx_risc_int) == 0) {
372 			QL_PRINT_10(ha, "done, index=%d, no nx_risc_int "
373 			    "stat=%xh\n", index, stat);
374 			rval = DDI_INTR_UNCLAIMED;
375 		} else {
376 			rval = DDI_INTR_CLAIMED;
377 			QL_PRINT_3(ha, "index=%d, interrupt stat=%xh\n",
378 			    index, stat);
379 
380 			/* Capture FW defined interrupt info */
381 			mbx = MSW(stat);
382 
383 			if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
384 			    != DDI_FM_OK) {
385 				qlc_fm_report_err_impact(ha,
386 				    QL_FM_EREPORT_ACC_HANDLE_CHECK);
387 			}
388 
389 			switch (stat & 0x1ff) {
390 			case ROM_MBX_SUCCESS:
391 			case ROM_MBX_ERR:
392 				ql_mbx_completion(ha, mbx, &set_flags,
393 				    &reset_flags);
394 				break;
395 
396 			case MBX_SUCCESS:
397 			case MBX_ERR:
398 				ql_mbx_completion(ha, mbx, &set_flags,
399 				    &reset_flags);
400 				break;
401 
402 			case ASYNC_EVENT:
403 				ql_async_event(ha, ha->rsp_queues[0],
404 				    (uint32_t)mbx, &isr_done_q,
405 				    &set_flags, &reset_flags);
406 				break;
407 
408 			case MULTI_Q_RSP_UPDATE:
409 				QL_PRINT_3(ha, "MULTI_Q_RSP_UPDATE mbx=%xh\n",
410 				    mbx);
411 				if (mbx < ha->rsp_queues_cnt) {
412 					rsp_q = ha->rsp_queues[mbx];
413 				}
414 				if (rsp_q == NULL) {
415 					EL(ha, "unsupported MULTI_Q_RSP_UPDATE"
416 					    " mbx=%d\n", mbx);
417 					rsp_q = ha->rsp_queues[0];
418 				}
419 				if (ha->flags & QUEUE_SHADOW_PTRS) {
420 					(void) ddi_dma_sync(
421 					    rsp_q->rsp_ring.dma_handle,
422 					    (off_t)rsp_q->rsp_in_shadow_ofst,
423 					    SHADOW_ENTRY_SIZE,
424 					    DDI_DMA_SYNC_FORCPU);
425 					mbx = ddi_get32(
426 					    rsp_q->rsp_ring.acc_handle,
427 					    rsp_q->rsp_in_shadow_ptr);
428 				} else {
429 					mbx = RD32_MBAR_REG(ha,
430 					    rsp_q->mbar_rsp_in);
431 				}
432 				/* FALLTHRU */
433 
434 			case RESP_UPDATE:
435 				/* Clear RISC interrupt */
436 				ql_clr_risc_intr(ha);
437 
438 				if (rsp_q == NULL) {
439 					rsp_q = ha->rsp_queues[0];
440 				}
441 				if (mbx != rsp_q->rsp_ring_index) {
442 					rsp_q->isp_rsp_index = (uint16_t)mbx;
443 					ql_response_pkt(ha, rsp_q, &isr_done_q,
444 					    &set_flags, &reset_flags);
445 				} else {
446 					/*EMPTY*/
447 					QL_PRINT_3(ha, "response "
448 					    "ring index same as before\n");
449 				}
450 				break;
451 
452 			case SCSI_FAST_POST_16:
453 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_16BIT;
454 				ql_async_event(ha, ha->rsp_queues[0],
455 				    stat, &isr_done_q, &set_flags,
456 				    &reset_flags);
457 				break;
458 
459 			case SCSI_FAST_POST_32:
460 				stat = (stat & 0xffff0000) | MBA_CMPLT_1_32BIT;
461 				ql_async_event(ha, ha->rsp_queues[0],
462 				    stat, &isr_done_q, &set_flags,
463 				    &reset_flags);
464 				break;
465 
466 			case CTIO_FAST_POST:
467 				stat = (stat & 0xffff0000) |
468 				    MBA_CTIO_COMPLETION;
469 				ql_async_event(ha, ha->rsp_queues[0],
470 				    stat, &isr_done_q, &set_flags,
471 				    &reset_flags);
472 				break;
473 
474 			case IP_FAST_POST_XMT:
475 				stat = (stat & 0xffff0000) | MBA_IP_COMPLETION;
476 				ql_async_event(ha, ha->rsp_queues[0],
477 				    stat, &isr_done_q, &set_flags,
478 				    &reset_flags);
479 				break;
480 
481 			case IP_FAST_POST_RCV:
482 				stat = (stat & 0xffff0000) | MBA_IP_RECEIVE;
483 				ql_async_event(ha, ha->rsp_queues[0],
484 				    stat, &isr_done_q, &set_flags,
485 				    &reset_flags);
486 				break;
487 
488 			case IP_FAST_POST_BRD:
489 				stat = (stat & 0xffff0000) | MBA_IP_BROADCAST;
490 				ql_async_event(ha, ha->rsp_queues[0],
491 				    stat, &isr_done_q, &set_flags,
492 				    &reset_flags);
493 				break;
494 
495 			case IP_FAST_POST_RCV_ALN:
496 				stat = (stat & 0xffff0000) |
497 				    MBA_IP_HDR_DATA_SPLIT;
498 				ql_async_event(ha, ha->rsp_queues[0],
499 				    stat, &isr_done_q, &set_flags,
500 				    &reset_flags);
501 				break;
502 
503 			case ATIO_UPDATE:
504 				EL(ha, "unsupported ATIO queue update"
505 				    " interrupt, status=%xh\n", stat);
506 				break;
507 
508 			case ATIO_RESP_UPDATE:
509 				EL(ha, "unsupported ATIO response queue "
510 				    "update interrupt, status=%xh\n", stat);
511 				break;
512 
513 			default:
514 				ql_handle_uncommon_risc_intr(ha, intr, stat,
515 				    &set_flags);
516 				break;
517 			}
518 		}
519 
520 		/* Clear RISC interrupt */
521 		if (rval == DDI_INTR_CLAIMED && rsp_q == NULL) {
522 			ql_clr_risc_intr(ha);
523 		}
524 
525 		/* A0 chip delay */
526 		if (CFG_IST(ha, CFG_CTRL_83XX) && ha->rev_id == 1 &&
527 		    ha->iflags & (IFLG_INTR_LEGACY | IFLG_INTR_FIXED)) {
528 			drv_usecwait(4);
529 		}
530 	}
531 
532 	/* Process claimed interrupts during polls. */
533 	if (rval == DDI_INTR_UNCLAIMED && ha->intr_claimed == B_TRUE) {
534 		ha->intr_claimed = B_FALSE;
535 		rval = DDI_INTR_CLAIMED;
536 	}
537 
538 	/* Release interrupt lock. */
539 	INDX_INTR_UNLOCK(ha, intr);
540 
541 	if (set_flags || reset_flags) {
542 		ql_awaken_task_daemon(ha, NULL, set_flags, reset_flags);
543 	}
544 
545 	if (isr_done_q.first != NULL) {
546 		ql_done(isr_done_q.first, B_FALSE);
547 	}
548 
549 	QL_PM_LOCK(ha);
550 	if (ha->pm_busy) {
551 		ha->pm_busy--;
552 	}
553 	QL_PM_UNLOCK(ha);
554 
555 	if (rval == DDI_INTR_CLAIMED) {
556 		QL_PRINT_3(ha, "done\n");
557 		ha->idle_timer = 0;
558 		ha->xioctl->TotalInterrupts++;
559 	} else {
560 		/*EMPTY*/
561 		QL_PRINT_10(ha, "interrupt not claimed\n");
562 	}
563 
564 	return (rval);
565 }
566 
567 /*
568  * ql_handle_uncommon_risc_intr
569  *	Handle an uncommon RISC interrupt.
570  *
571  * Input:
572  *	ha:		adapter state pointer.
573  *	intr:		interrupt index.
574  *	stat:		interrupt status
575  *	set_flags:	task daemon flags to set.
576  *
577  * Context:
578  *	Interrupt or Kernel context, no mailbox commands allowed.
579  */
580 static void
ql_handle_uncommon_risc_intr(ql_adapter_state_t * ha,int intr,uint32_t stat,uint64_t * set_flags)581 ql_handle_uncommon_risc_intr(ql_adapter_state_t *ha, int intr, uint32_t stat,
582     uint64_t *set_flags)
583 {
584 	uint16_t	hccr_reg;
585 
586 	hccr_reg = RD16_IO_REG(ha, hccr);
587 
588 	if (stat & RH_RISC_PAUSED ||
589 	    (hccr_reg & (BIT_15 | BIT_13 | BIT_11 | BIT_8))) {
590 
591 		ADAPTER_STATE_LOCK(ha);
592 		ha->flags |= PARITY_ERROR;
593 		ADAPTER_STATE_UNLOCK(ha);
594 
595 		if (ha->parity_pause_errors == 0 ||
596 		    ha->parity_hccr_err != hccr_reg ||
597 		    ha->parity_stat_err != stat) {
598 			cmn_err(CE_WARN, "qlc(%d): isr, Internal Parity/"
599 			    "Pause Error - hccr=%xh, stat=%xh, count=%d",
600 			    ha->instance, hccr_reg, stat,
601 			    ha->parity_pause_errors);
602 			ha->parity_hccr_err = hccr_reg;
603 			ha->parity_stat_err = stat;
604 		}
605 
606 		EL(ha, "parity/pause error, isp_abort_needed\n");
607 
608 		INDX_INTR_UNLOCK(ha, intr);
609 		if (ql_binary_fw_dump(ha, FALSE) != QL_SUCCESS) {
610 			ql_reset_chip(ha);
611 		}
612 		INDX_INTR_LOCK(ha, intr);
613 
614 		if (ha->parity_pause_errors == 0) {
615 			ha->log_parity_pause = B_TRUE;
616 		}
617 
618 		if (ha->parity_pause_errors < 0xffffffff) {
619 			ha->parity_pause_errors++;
620 		}
621 
622 		*set_flags |= ISP_ABORT_NEEDED;
623 
624 		/* Disable ISP interrupts. */
625 		ql_disable_intr(ha);
626 	} else {
627 		EL(ha, "UNKNOWN interrupt status=%xh, hccr=%xh\n",
628 		    stat, hccr_reg);
629 	}
630 }
631 
632 /*
633  * ql_mbx_completion
634  *	Processes mailbox completions.
635  *
636  * Input:
637  *	ha:		adapter state pointer.
638  *	mb0:		Mailbox 0 contents.
639  *	set_flags:	task daemon flags to set.
640  *	reset_flags:	task daemon flags to reset.
641  *
642  * Context:
643  *	Interrupt context.
644  */
645 /* ARGSUSED */
646 static void
ql_mbx_completion(ql_adapter_state_t * ha,uint16_t mb0,uint64_t * set_flags,uint64_t * reset_flags)647 ql_mbx_completion(ql_adapter_state_t *ha, uint16_t mb0, uint64_t *set_flags,
648     uint64_t *reset_flags)
649 {
650 	uint32_t	index;
651 	uint16_t	cnt;
652 
653 	QL_PRINT_3(ha, "started\n");
654 
655 	/* Load return mailbox registers. */
656 	MBX_REGISTER_LOCK(ha);
657 
658 	if (ha->mcp != NULL) {
659 		ha->mcp->mb[0] = mb0;
660 		index = ha->mcp->in_mb & ~MBX_0;
661 
662 		for (cnt = 1; cnt < MAX_MBOX_COUNT && index != 0; cnt++) {
663 			index >>= 1;
664 			if (index & MBX_0) {
665 				ha->mcp->mb[cnt] = RD16_IO_REG(ha,
666 				    mailbox_out[cnt]);
667 			}
668 		}
669 
670 	} else {
671 		EL(ha, "mcp == NULL\n");
672 	}
673 
674 	ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_INTERRUPT);
675 	if (ha->flags & INTERRUPTS_ENABLED) {
676 		cv_broadcast(&ha->cv_mbx_intr);
677 	}
678 
679 	MBX_REGISTER_UNLOCK(ha);
680 
681 	QL_PRINT_3(ha, "done\n");
682 }
683 
684 /*
685  * ql_async_event
686  *	Processes asynchronous events.
687  *
688  * Input:
689  *	ha:		adapter state pointer.
690  *	rsp_q:		response queue structure pointer.
691  *	mbx:		Mailbox 0 register.
692  *	done_q:		head pointer to done queue.
693  *	set_flags:	task daemon flags to set.
694  *	reset_flags:	task daemon flags to reset.
695  *
696  * Context:
697  *	Interrupt or Kernel context, no mailbox commands allowed.
698  */
699 static void
ql_async_event(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,uint32_t mbx,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)700 ql_async_event(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, uint32_t mbx,
701     ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
702 {
703 	uint32_t		index, handles[5];
704 	uint16_t		cnt, handle_cnt, mb[MAX_MBOX_COUNT];
705 	ql_srb_t		*sp;
706 	port_id_t		s_id;
707 	ql_tgt_t		*tq;
708 	ql_adapter_state_t	*vha;
709 
710 	QL_PRINT_3(ha, "started\n");
711 
712 	/* Setup to process fast completion. */
713 	mb[0] = LSW(mbx);
714 	switch (mb[0]) {
715 	case MBA_SCSI_COMPLETION:
716 		handles[0] = SHORT_TO_LONG(RD16_IO_REG(ha, mailbox_out[1]),
717 		    RD16_IO_REG(ha, mailbox_out[2]));
718 		handle_cnt = 1;
719 		break;
720 
721 	case MBA_CMPLT_1_16BIT:
722 		handles[0] = MSW(mbx);
723 		handle_cnt = 1;
724 		mb[0] = MBA_SCSI_COMPLETION;
725 		break;
726 
727 	case MBA_CMPLT_2_16BIT:
728 		handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
729 		handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
730 		handle_cnt = 2;
731 		mb[0] = MBA_SCSI_COMPLETION;
732 		break;
733 
734 	case MBA_CMPLT_3_16BIT:
735 		handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
736 		handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
737 		handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
738 		handle_cnt = 3;
739 		mb[0] = MBA_SCSI_COMPLETION;
740 		break;
741 
742 	case MBA_CMPLT_4_16BIT:
743 		handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
744 		handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
745 		handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
746 		handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
747 		handle_cnt = 4;
748 		mb[0] = MBA_SCSI_COMPLETION;
749 		break;
750 
751 	case MBA_CMPLT_5_16BIT:
752 		handles[0] = (uint32_t)RD16_IO_REG(ha, mailbox_out[1]);
753 		handles[1] = (uint32_t)RD16_IO_REG(ha, mailbox_out[2]);
754 		handles[2] = (uint32_t)RD16_IO_REG(ha, mailbox_out[3]);
755 		handles[3] = (uint32_t)RD16_IO_REG(ha, mailbox_out[6]);
756 		handles[4] = (uint32_t)RD16_IO_REG(ha, mailbox_out[7]);
757 		handle_cnt = 5;
758 		mb[0] = MBA_SCSI_COMPLETION;
759 		break;
760 
761 	case MBA_CMPLT_1_32BIT:
762 		handles[0] = SHORT_TO_LONG(MSW(mbx),
763 		    RD16_IO_REG(ha, mailbox_out[2]));
764 		handle_cnt = 1;
765 		mb[0] = MBA_SCSI_COMPLETION;
766 		break;
767 
768 	case MBA_CMPLT_2_32BIT:
769 		handles[0] = SHORT_TO_LONG(
770 		    RD16_IO_REG(ha, mailbox_out[1]),
771 		    RD16_IO_REG(ha, mailbox_out[2]));
772 		handles[1] = SHORT_TO_LONG(
773 		    RD16_IO_REG(ha, mailbox_out[6]),
774 		    RD16_IO_REG(ha, mailbox_out[7]));
775 		handle_cnt = 2;
776 		mb[0] = MBA_SCSI_COMPLETION;
777 		break;
778 
779 	case MBA_CTIO_COMPLETION:
780 	case MBA_IP_COMPLETION:
781 		handles[0] = CFG_IST(ha, CFG_CTRL_22XX) ? SHORT_TO_LONG(
782 		    RD16_IO_REG(ha, mailbox_out[1]),
783 		    RD16_IO_REG(ha, mailbox_out[2])) :
784 		    SHORT_TO_LONG(MSW(mbx), RD16_IO_REG(ha, mailbox_out[2]));
785 		handle_cnt = 1;
786 		mb[0] = MBA_SCSI_COMPLETION;
787 		break;
788 
789 	default:
790 		break;
791 	}
792 
793 	/* Handle asynchronous event */
794 	switch (mb[0]) {
795 	case MBA_SCSI_COMPLETION:
796 		QL_PRINT_5(ha, "Fast post completion\n");
797 
798 		if ((ha->flags & ONLINE) == 0) {
799 			break;
800 		}
801 
802 		for (cnt = 0; cnt < handle_cnt; cnt++) {
803 			QL_PRINT_5(ha, "Fast post completion, handle=%xh\n",
804 			    handles[cnt]);
805 
806 			/* Get handle. */
807 			index = handles[cnt] & OSC_INDEX_MASK;
808 
809 			/* Validate handle. */
810 			sp = index < ha->osc_max_cnt ?
811 			    ha->outstanding_cmds[index] : NULL;
812 
813 			if (sp == QL_ABORTED_SRB(ha)) {
814 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
815 				    handles[cnt]);
816 				ha->outstanding_cmds[index] = NULL;
817 				continue;
818 			}
819 			if (sp != NULL && sp->handle == handles[cnt]) {
820 				ha->outstanding_cmds[index] = NULL;
821 				sp->handle = 0;
822 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
823 
824 				/* Set completed status. */
825 				sp->flags |= SRB_ISP_COMPLETED;
826 
827 				/* Set completion status */
828 				sp->pkt->pkt_reason = CS_COMPLETE;
829 
830 				if (!(sp->flags & SRB_FCP_CMD_PKT)) {
831 					/* Place block on done queue */
832 					ql_add_link_b(done_q, &sp->cmd);
833 				} else {
834 					ql_fast_fcp_post(sp, rsp_q);
835 				}
836 			} else if (handles[cnt] != QL_FCA_BRAND) {
837 				if (sp == NULL) {
838 					EL(ha, "%xh unknown IOCB handle=%xh\n",
839 					    mb[0], handles[cnt]);
840 				} else {
841 					EL(ha, "%xh mismatch IOCB handle "
842 					    "pkt=%xh, sp=%xh\n", mb[0],
843 					    handles[cnt], sp->handle);
844 				}
845 
846 				EL(ha, "%xh Fast post, mbx1=%xh, mbx2=%xh, "
847 				    "mbx3=%xh, mbx6=%xh, mbx7=%xh\n", mb[0],
848 				    RD16_IO_REG(ha, mailbox_out[1]),
849 				    RD16_IO_REG(ha, mailbox_out[2]),
850 				    RD16_IO_REG(ha, mailbox_out[3]),
851 				    RD16_IO_REG(ha, mailbox_out[6]),
852 				    RD16_IO_REG(ha, mailbox_out[7]));
853 
854 				ADAPTER_STATE_LOCK(ha);
855 				ha->flags |= FW_DUMP_NEEDED;
856 				ADAPTER_STATE_UNLOCK(ha);
857 
858 				if (!(ha->task_daemon_flags &
859 				    ISP_ABORT_NEEDED)) {
860 					EL(ha, "%xh ISP Invalid handle, "
861 					    "isp_abort_needed\n", mb[0]);
862 					*set_flags |= ISP_ABORT_NEEDED;
863 				}
864 			}
865 		}
866 		break;
867 
868 	case MBA_RESET:		/* Reset */
869 		EL(ha, "%xh Reset received\n", mb[0]);
870 		*set_flags |= MARKER_NEEDED;
871 		break;
872 
873 	case MBA_SYSTEM_ERR:		/* System Error */
874 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
875 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
876 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
877 		mb[7] = RD16_IO_REG(ha, mailbox_out[7]);
878 
879 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx1=%xh, "
880 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh,\n "
881 		    "mbx7=%xh, mbx8=%xh, mbx9=%xh, mbx10=%xh, mbx11=%xh, "
882 		    "mbx12=%xh,\n", mb[0], mb[1], mb[2], mb[3],
883 		    RD16_IO_REG(ha, mailbox_out[4]),
884 		    RD16_IO_REG(ha, mailbox_out[5]),
885 		    RD16_IO_REG(ha, mailbox_out[6]), mb[7],
886 		    RD16_IO_REG(ha, mailbox_out[8]),
887 		    RD16_IO_REG(ha, mailbox_out[9]),
888 		    RD16_IO_REG(ha, mailbox_out[10]),
889 		    RD16_IO_REG(ha, mailbox_out[11]),
890 		    RD16_IO_REG(ha, mailbox_out[12]));
891 
892 		EL(ha, "%xh ISP System Error, isp_abort_needed\n mbx13=%xh, "
893 		    "mbx14=%xh, mbx15=%xh, mbx16=%xh, mbx17=%xh, mbx18=%xh,\n"
894 		    "mbx19=%xh, mbx20=%xh, mbx21=%xh, mbx22=%xh, mbx23=%xh\n",
895 		    mb[0], RD16_IO_REG(ha, mailbox_out[13]),
896 		    RD16_IO_REG(ha, mailbox_out[14]),
897 		    RD16_IO_REG(ha, mailbox_out[15]),
898 		    RD16_IO_REG(ha, mailbox_out[16]),
899 		    RD16_IO_REG(ha, mailbox_out[17]),
900 		    RD16_IO_REG(ha, mailbox_out[18]),
901 		    RD16_IO_REG(ha, mailbox_out[19]),
902 		    RD16_IO_REG(ha, mailbox_out[20]),
903 		    RD16_IO_REG(ha, mailbox_out[21]),
904 		    RD16_IO_REG(ha, mailbox_out[22]),
905 		    RD16_IO_REG(ha, mailbox_out[23]));
906 
907 		if (ha->reg_off->mbox_cnt > 24) {
908 			EL(ha, "%xh ISP System Error, mbx24=%xh, mbx25=%xh, "
909 			    "mbx26=%xh,\n mbx27=%xh, mbx28=%xh, mbx29=%xh, "
910 			    "mbx30=%xh, mbx31=%xh\n", mb[0],
911 			    RD16_IO_REG(ha, mailbox_out[24]),
912 			    RD16_IO_REG(ha, mailbox_out[25]),
913 			    RD16_IO_REG(ha, mailbox_out[26]),
914 			    RD16_IO_REG(ha, mailbox_out[27]),
915 			    RD16_IO_REG(ha, mailbox_out[28]),
916 			    RD16_IO_REG(ha, mailbox_out[29]),
917 			    RD16_IO_REG(ha, mailbox_out[30]),
918 			    RD16_IO_REG(ha, mailbox_out[31]));
919 		}
920 
921 		ADAPTER_STATE_LOCK(ha);
922 		ha->flags |= FW_DUMP_NEEDED;
923 		ADAPTER_STATE_UNLOCK(ha);
924 
925 		/* Signal task daemon to store error log. */
926 		if (ha->errlog[0] == 0) {
927 			ha->errlog[3] = mb[3];
928 			ha->errlog[2] = mb[2];
929 			ha->errlog[1] = mb[1];
930 			ha->errlog[0] = FLASH_ERRLOG_AEN_8002;
931 		}
932 
933 		if (CFG_IST(ha, CFG_CTRL_81XX) && mb[7] & SE_MPI_RISC) {
934 			ADAPTER_STATE_LOCK(ha);
935 			ha->flags |= MPI_RESET_NEEDED;
936 			ADAPTER_STATE_UNLOCK(ha);
937 		}
938 
939 		*set_flags |= ISP_ABORT_NEEDED;
940 		ha->xioctl->ControllerErrorCount++;
941 		break;
942 
943 	case MBA_REQ_TRANSFER_ERR:  /* Request Transfer Error */
944 		EL(ha, "%xh Request Transfer Error received, "
945 		    "isp_abort_needed\n", mb[0]);
946 
947 		/* Signal task daemon to store error log. */
948 		if (ha->errlog[0] == 0) {
949 			ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
950 			ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
951 			ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
952 			ha->errlog[0] = FLASH_ERRLOG_AEN_8003;
953 		}
954 
955 		*set_flags |= ISP_ABORT_NEEDED;
956 		ha->xioctl->ControllerErrorCount++;
957 
958 		(void) qlc_fm_report_err_impact(ha,
959 		    QL_FM_EREPORT_MBA_REQ_TRANSFER_ERR);
960 
961 		break;
962 
963 	case MBA_RSP_TRANSFER_ERR:  /* Response Xfer Err */
964 		EL(ha, "%xh Response Transfer Error received,"
965 		    " isp_abort_needed\n", mb[0]);
966 
967 		/* Signal task daemon to store error log. */
968 		if (ha->errlog[0] == 0) {
969 			ha->errlog[3] = RD16_IO_REG(ha, mailbox_out[3]);
970 			ha->errlog[2] = RD16_IO_REG(ha, mailbox_out[2]);
971 			ha->errlog[1] = RD16_IO_REG(ha, mailbox_out[1]);
972 			ha->errlog[0] = FLASH_ERRLOG_AEN_8004;
973 		}
974 
975 		*set_flags |= ISP_ABORT_NEEDED;
976 		ha->xioctl->ControllerErrorCount++;
977 
978 		(void) qlc_fm_report_err_impact(ha,
979 		    QL_FM_EREPORT_MBA_RSP_TRANSFER_ERR);
980 
981 		break;
982 
983 	case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
984 		EL(ha, "%xh Request Queue Wake-up "
985 		    "received, mbx1=%xh\n", mb[0],
986 		    RD16_IO_REG(ha, mailbox_out[1]));
987 		break;
988 
989 	case MBA_MENLO_ALERT:	/* Menlo Alert Notification */
990 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
991 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
992 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
993 
994 		EL(ha, "%xh Menlo Alert Notification received, mbx1=%xh,"
995 		    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
996 
997 		switch (mb[1]) {
998 		case MLA_LOGIN_OPERATIONAL_FW:
999 			ADAPTER_STATE_LOCK(ha);
1000 			ha->flags |= MENLO_LOGIN_OPERATIONAL;
1001 			ADAPTER_STATE_UNLOCK(ha);
1002 			break;
1003 		case MLA_PANIC_RECOVERY:
1004 		case MLA_LOGIN_DIAGNOSTIC_FW:
1005 		case MLA_LOGIN_GOLDEN_FW:
1006 		case MLA_REJECT_RESPONSE:
1007 		default:
1008 			break;
1009 		}
1010 		break;
1011 
1012 	case MBA_LIP_F8:	/* Received a LIP F8. */
1013 	case MBA_LIP_RESET:	/* LIP reset occurred. */
1014 	case MBA_LIP_OCCURRED:	/* Loop Initialization Procedure */
1015 		if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1016 			EL(ha, "%xh DCBX_STARTED received, mbx1=%xh, mbx2=%xh"
1017 			    "\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1018 			    RD16_IO_REG(ha, mailbox_out[2]));
1019 		} else {
1020 			EL(ha, "%xh LIP received\n", mb[0]);
1021 		}
1022 
1023 		ADAPTER_STATE_LOCK(ha);
1024 		ha->flags &= ~POINT_TO_POINT;
1025 		ADAPTER_STATE_UNLOCK(ha);
1026 
1027 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1028 			*set_flags |= LOOP_DOWN;
1029 		}
1030 		ql_port_state(ha, FC_STATE_OFFLINE,
1031 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1032 
1033 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1034 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1035 		}
1036 
1037 		ha->adapter_stats->lip_count++;
1038 
1039 		/* Update AEN queue. */
1040 		ha->xioctl->TotalLipResets++;
1041 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1042 			ql_enqueue_aen(ha, mb[0], NULL);
1043 		}
1044 		break;
1045 
1046 	case MBA_LOOP_UP:
1047 		if (!CFG_IST(ha, CFG_CTRL_22XX)) {
1048 			ha->iidma_rate = RD16_IO_REG(ha, mailbox_out[1]);
1049 			if (ha->iidma_rate == IIDMA_RATE_1GB) {
1050 				ha->state = FC_PORT_STATE_MASK(
1051 				    ha->state) | FC_STATE_1GBIT_SPEED;
1052 				index = 1;
1053 			} else if (ha->iidma_rate == IIDMA_RATE_2GB) {
1054 				ha->state = FC_PORT_STATE_MASK(
1055 				    ha->state) | FC_STATE_2GBIT_SPEED;
1056 				index = 2;
1057 			} else if (ha->iidma_rate == IIDMA_RATE_4GB) {
1058 				ha->state = FC_PORT_STATE_MASK(
1059 				    ha->state) | FC_STATE_4GBIT_SPEED;
1060 				index = 4;
1061 			} else if (ha->iidma_rate == IIDMA_RATE_8GB) {
1062 				ha->state = FC_PORT_STATE_MASK(
1063 				    ha->state) | FC_STATE_8GBIT_SPEED;
1064 				index = 8;
1065 			} else if (ha->iidma_rate == IIDMA_RATE_10GB) {
1066 				ha->state = FC_PORT_STATE_MASK(
1067 				    ha->state) | FC_STATE_10GBIT_SPEED;
1068 				index = 10;
1069 			} else if (ha->iidma_rate == IIDMA_RATE_16GB) {
1070 				ha->state = FC_PORT_STATE_MASK(
1071 				    ha->state) | FC_STATE_16GBIT_SPEED;
1072 				index = 16;
1073 			} else if (ha->iidma_rate == IIDMA_RATE_32GB) {
1074 				ha->state = FC_PORT_STATE_MASK(
1075 				    ha->state) | FC_STATE_32GBIT_SPEED;
1076 				index = 32;
1077 			} else {
1078 				ha->state = FC_PORT_STATE_MASK(
1079 				    ha->state);
1080 				index = 0;
1081 			}
1082 		} else {
1083 			ha->iidma_rate = IIDMA_RATE_1GB;
1084 			ha->state = FC_PORT_STATE_MASK(ha->state) |
1085 			    FC_STATE_FULL_SPEED;
1086 			index = 1;
1087 		}
1088 
1089 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1090 			vha->state = FC_PORT_STATE_MASK(vha->state) |
1091 			    FC_PORT_SPEED_MASK(ha->state);
1092 		}
1093 		EL(ha, "%d GB %xh Loop Up received\n", index, mb[0]);
1094 
1095 		/* Update AEN queue. */
1096 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1097 			ql_enqueue_aen(ha, mb[0], NULL);
1098 		}
1099 		break;
1100 
1101 	case MBA_LOOP_DOWN:
1102 		EL(ha, "%xh Loop Down received, mbx1=%xh, mbx2=%xh, mbx3=%xh, "
1103 		    "mbx4=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1104 		    RD16_IO_REG(ha, mailbox_out[2]),
1105 		    RD16_IO_REG(ha, mailbox_out[3]),
1106 		    RD16_IO_REG(ha, mailbox_out[4]));
1107 
1108 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1109 			*set_flags |= LOOP_DOWN;
1110 		}
1111 		ql_port_state(ha, FC_STATE_OFFLINE,
1112 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1113 
1114 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1115 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1116 		}
1117 
1118 		if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1119 			ha->sfp_stat = RD16_IO_REG(ha, mailbox_out[2]);
1120 		}
1121 
1122 		/* Update AEN queue. */
1123 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1124 			ql_enqueue_aen(ha, mb[0], NULL);
1125 		}
1126 		break;
1127 
1128 	case MBA_PORT_UPDATE:
1129 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1130 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1131 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1132 		    RD16_IO_REG(ha, mailbox_out[3]) : 0);
1133 
1134 		/* Locate port state structure. */
1135 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1136 			if (vha->vp_index == LSB(mb[3])) {
1137 				break;
1138 			}
1139 		}
1140 		if (vha == NULL) {
1141 			break;
1142 		}
1143 
1144 		if (mb[1] == 0xffff &&
1145 		    mb[2] == 7 && (MSB(mb[3]) == 0xe || MSB(mb[3]) == 0x1a ||
1146 		    MSB(mb[3]) == 0x1c || MSB(mb[3]) == 0x1d ||
1147 		    MSB(mb[3]) == 0x1e)) {
1148 			EL(ha, "%xh Port Database Update, Loop down "
1149 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1150 			    mb[0], mb[1], mb[2], mb[3]);
1151 			/*
1152 			 * received FLOGI reject
1153 			 * received FLOGO
1154 			 * FCF configuration changed
1155 			 * FIP Clear Virtual Link received
1156 			 * FCF timeout
1157 			 */
1158 			if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1159 				*set_flags |= LOOP_DOWN;
1160 			}
1161 			ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE |
1162 			    COMMAND_WAIT_NEEDED | LOOP_DOWN);
1163 			if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1164 				ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1165 			}
1166 		/*
1167 		 * In N port 2 N port topology the FW provides a port
1168 		 * database entry at loop_id 0x7fe which we use to
1169 		 * acquire the Ports WWPN.
1170 		 */
1171 		} else if ((mb[1] != 0x7fe) &&
1172 		    ((FC_PORT_STATE_MASK(vha->state) != FC_STATE_OFFLINE ||
1173 		    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
1174 		    (mb[2] != 6 || mb[3] != 0))))) {
1175 			EL(ha, "%xh Port Database Update, Login/Logout "
1176 			    "received, mbx1=%xh, mbx2=%xh, mbx3=%xh\n",
1177 			    mb[0], mb[1], mb[2], mb[3]);
1178 		} else {
1179 			EL(ha, "%xh Port Database Update received, mbx1=%xh,"
1180 			    " mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2],
1181 			    mb[3]);
1182 			*set_flags |= LOOP_RESYNC_NEEDED;
1183 			*set_flags &= ~LOOP_DOWN;
1184 			*reset_flags |= LOOP_DOWN;
1185 			*reset_flags &= ~LOOP_RESYNC_NEEDED;
1186 			vha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
1187 			TASK_DAEMON_LOCK(ha);
1188 			vha->task_daemon_flags |= LOOP_RESYNC_NEEDED;
1189 			vha->task_daemon_flags &= ~LOOP_DOWN;
1190 			TASK_DAEMON_UNLOCK(ha);
1191 			ADAPTER_STATE_LOCK(ha);
1192 			vha->flags &= ~ABORT_CMDS_LOOP_DOWN_TMO;
1193 			ADAPTER_STATE_UNLOCK(ha);
1194 		}
1195 
1196 		/* Update AEN queue. */
1197 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1198 			ql_enqueue_aen(ha, mb[0], NULL);
1199 		}
1200 		break;
1201 
1202 	case MBA_RSCN_UPDATE:
1203 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1204 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1205 		mb[3] = (uint16_t)(ha->flags & VP_ENABLED ?
1206 		    RD16_IO_REG(ha, mailbox_out[3]) : 0);
1207 
1208 		/* Locate port state structure. */
1209 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
1210 			if (vha->vp_index == LSB(mb[3])) {
1211 				break;
1212 			}
1213 		}
1214 
1215 		if (vha == NULL) {
1216 			break;
1217 		}
1218 
1219 		if (LSB(mb[1]) == vha->d_id.b.domain &&
1220 		    MSB(mb[2]) == vha->d_id.b.area &&
1221 		    LSB(mb[2]) == vha->d_id.b.al_pa) {
1222 			EL(ha, "%xh RSCN match adapter, mbx1=%xh, mbx2=%xh, "
1223 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1224 		} else {
1225 			EL(ha, "%xh RSCN received, mbx1=%xh, mbx2=%xh, "
1226 			    "mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1227 			if (FC_PORT_STATE_MASK(vha->state) !=
1228 			    FC_STATE_OFFLINE) {
1229 				ql_rcv_rscn_els(vha, &mb[0], done_q);
1230 				TASK_DAEMON_LOCK(ha);
1231 				vha->task_daemon_flags |= RSCN_UPDATE_NEEDED;
1232 				TASK_DAEMON_UNLOCK(ha);
1233 				*set_flags |= RSCN_UPDATE_NEEDED;
1234 			}
1235 		}
1236 
1237 		/* Update AEN queue. */
1238 		if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
1239 			ql_enqueue_aen(ha, mb[0], NULL);
1240 		}
1241 		break;
1242 
1243 	case MBA_LIP_ERROR:	/* Loop initialization errors. */
1244 		EL(ha, "%xh LIP error received, mbx1=%xh\n", mb[0],
1245 		    RD16_IO_REG(ha, mailbox_out[1]));
1246 		break;
1247 
1248 	case MBA_IP_RECEIVE:
1249 	case MBA_IP_BROADCAST:
1250 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1251 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1252 		mb[3] = RD16_IO_REG(ha, mailbox_out[3]);
1253 
1254 		EL(ha, "%xh IP packet/broadcast received, mbx1=%xh, "
1255 		    "mbx2=%xh, mbx3=%xh\n", mb[0], mb[1], mb[2], mb[3]);
1256 
1257 		/* Locate device queue. */
1258 		s_id.b.al_pa = LSB(mb[2]);
1259 		s_id.b.area = MSB(mb[2]);
1260 		s_id.b.domain = LSB(mb[1]);
1261 		if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
1262 			EL(ha, "Unknown IP device=%xh\n", s_id.b24);
1263 			break;
1264 		}
1265 
1266 		cnt = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1267 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb24.buf_size[0],
1268 		    ha->ip_init_ctrl_blk.cb24.buf_size[1]) :
1269 		    CHAR_TO_SHORT(ha->ip_init_ctrl_blk.cb.buf_size[0],
1270 		    ha->ip_init_ctrl_blk.cb.buf_size[1]));
1271 
1272 		tq->ub_sequence_length = mb[3];
1273 		tq->ub_total_seg_cnt = (uint8_t)(mb[3] / cnt);
1274 		if (mb[3] % cnt) {
1275 			tq->ub_total_seg_cnt++;
1276 		}
1277 		cnt = (uint16_t)(tq->ub_total_seg_cnt + 10);
1278 
1279 		for (index = 10; index < ha->reg_off->mbox_cnt && index < cnt;
1280 		    index++) {
1281 			mb[index] = RD16_IO_REG(ha, mailbox_out[index]);
1282 		}
1283 
1284 		tq->ub_seq_id = ++ha->ub_seq_id;
1285 		tq->ub_seq_cnt = 0;
1286 		tq->ub_frame_ro = 0;
1287 		tq->ub_loop_id = (uint16_t)(mb[0] == MBA_IP_BROADCAST ?
1288 		    (CFG_IST(ha, CFG_CTRL_24XX) ? BROADCAST_24XX_HDL :
1289 		    IP_BROADCAST_LOOP_ID) : tq->loop_id);
1290 		ha->rcv_dev_q = tq;
1291 
1292 		for (cnt = 10; cnt < ha->reg_off->mbox_cnt &&
1293 		    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
1294 			if (ql_ub_frame_hdr(ha, tq, mb[cnt], done_q) !=
1295 			    QL_SUCCESS) {
1296 				EL(ha, "ql_ub_frame_hdr failed, "
1297 				    "isp_abort_needed\n");
1298 				*set_flags |= ISP_ABORT_NEEDED;
1299 				break;
1300 			}
1301 		}
1302 		break;
1303 
1304 	case MBA_IP_LOW_WATER_MARK:
1305 	case MBA_IP_RCV_BUFFER_EMPTY:
1306 		EL(ha, "%xh IP low water mark / RCV buffer empty received\n",
1307 		    mb[0]);
1308 		*set_flags |= NEED_UNSOLICITED_BUFFERS;
1309 		break;
1310 
1311 	case MBA_IP_HDR_DATA_SPLIT:
1312 		EL(ha, "%xh IP HDR data split received\n", mb[0]);
1313 		break;
1314 
1315 	case MBA_ERROR_LOGGING_DISABLED:
1316 		EL(ha, "%xh error logging disabled received, "
1317 		    "mbx1=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1318 		break;
1319 
1320 	case MBA_POINT_TO_POINT:
1321 	/* case MBA_DCBX_COMPLETED: */
1322 		if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1323 			EL(ha, "%xh DCBX completed received\n", mb[0]);
1324 		} else {
1325 			EL(ha, "%xh Point to Point Mode received\n", mb[0]);
1326 		}
1327 		ADAPTER_STATE_LOCK(ha);
1328 		ha->flags |= POINT_TO_POINT;
1329 		ADAPTER_STATE_UNLOCK(ha);
1330 		if (!(ha->task_daemon_flags & LOOP_DOWN)) {
1331 			*set_flags |= LOOP_DOWN;
1332 		}
1333 		if (ha->loop_down_timer == LOOP_DOWN_TIMER_OFF) {
1334 			ha->loop_down_timer = LOOP_DOWN_TIMER_START;
1335 		}
1336 		ql_port_state(ha, FC_STATE_OFFLINE,
1337 		    FC_STATE_CHANGE | COMMAND_WAIT_NEEDED | LOOP_DOWN);
1338 		break;
1339 
1340 	case MBA_FCF_CONFIG_ERROR:
1341 		EL(ha, "%xh FCF configuration Error received, mbx1=%xh\n",
1342 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1343 		break;
1344 
1345 	case MBA_DCBX_PARAM_CHANGED:
1346 		EL(ha, "%xh DCBX parameters changed received, mbx1=%xh\n",
1347 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1348 		break;
1349 
1350 	case MBA_CHG_IN_CONNECTION:
1351 		mb[1] = RD16_IO_REG(ha, mailbox_out[1]);
1352 		if (mb[1] == 2) {
1353 			EL(ha, "%xh Change In Connection received, "
1354 			    "mbx1=%xh\n", mb[0], mb[1]);
1355 			ADAPTER_STATE_LOCK(ha);
1356 			ha->flags &= ~POINT_TO_POINT;
1357 			ADAPTER_STATE_UNLOCK(ha);
1358 			if (ha->topology & QL_N_PORT) {
1359 				ha->topology = (uint8_t)(ha->topology &
1360 				    ~QL_N_PORT);
1361 				ha->topology = (uint8_t)(ha->topology |
1362 				    QL_NL_PORT);
1363 			}
1364 		} else {
1365 			EL(ha, "%xh Change In Connection received, "
1366 			    "mbx1=%xh, isp_abort_needed\n", mb[0], mb[1]);
1367 			*set_flags |= ISP_ABORT_NEEDED;
1368 		}
1369 		break;
1370 
1371 	case MBA_ZIO_UPDATE:
1372 		EL(ha, "%xh ZIO response received\n", mb[0]);
1373 
1374 		rsp_q->isp_rsp_index = RD16_IO_REG(ha, resp_in);
1375 		ql_response_pkt(ha, rsp_q, done_q, set_flags, reset_flags);
1376 		break;
1377 
1378 	case MBA_PORT_BYPASS_CHANGED:
1379 		EL(ha, "%xh Port Bypass Changed received, mbx1=%xh\n",
1380 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1381 		/*
1382 		 * Event generated when there is a transition on
1383 		 * port bypass of crystal+.
1384 		 * Mailbox 1:	Bit 0 - External.
1385 		 *		Bit 2 - Internal.
1386 		 * When the bit is 0, the port is bypassed.
1387 		 *
1388 		 * For now we will generate a LIP for all cases.
1389 		 */
1390 		*set_flags |= HANDLE_PORT_BYPASS_CHANGE;
1391 		break;
1392 
1393 	case MBA_RECEIVE_ERROR:
1394 		EL(ha, "%xh Receive Error received, mbx1=%xh, mbx2=%xh\n",
1395 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1396 		    RD16_IO_REG(ha, mailbox_out[2]));
1397 		break;
1398 
1399 	case MBA_LS_RJT_SENT:
1400 		EL(ha, "%xh LS_RJT Response Sent ELS=%xh\n", mb[0],
1401 		    RD16_IO_REG(ha, mailbox_out[1]));
1402 		break;
1403 
1404 	case MBA_FW_RESTART_COMP:
1405 		EL(ha, "%xh firmware restart complete received mb1=%xh\n",
1406 		    mb[0], RD16_IO_REG(ha, mailbox_out[1]));
1407 		break;
1408 
1409 	/*
1410 	 * MBA_IDC_COMPLETE &  MBA_IDC_NOTIFICATION: We won't get another
1411 	 * IDC async event until we ACK the current one.
1412 	 */
1413 	case MBA_IDC_COMPLETE:
1414 		mb[2] = RD16_IO_REG(ha, mailbox_out[2]);
1415 		EL(ha, "%xh MBA_IDC_COMPLETE received, mbx2=%xh\n", mb[0],
1416 		    mb[2]);
1417 		switch (mb[2]) {
1418 		case IDC_OPC_FLASH_ACC:
1419 		case IDC_OPC_RESTART_MPI:
1420 		case IDC_OPC_PORT_RESET_MBC:
1421 		case IDC_OPC_SET_PORT_CONFIG_MBC:
1422 			ADAPTER_STATE_LOCK(ha);
1423 			ha->flags |= IDC_RESTART_NEEDED;
1424 			ADAPTER_STATE_UNLOCK(ha);
1425 			break;
1426 		default:
1427 			EL(ha, "unknown IDC completion opcode=%xh\n", mb[2]);
1428 			break;
1429 		}
1430 		break;
1431 
1432 	case MBA_IDC_NOTIFICATION:
1433 		for (cnt = 1; cnt < 8; cnt++) {
1434 			ha->idc_mb[cnt] = RD16_IO_REG(ha, mailbox_out[cnt]);
1435 		}
1436 		EL(ha, "%xh MBA_IDC_REQ_NOTIFICATION received, mbx1=%xh, "
1437 		    "mbx2=%xh, mbx3=%xh, mbx4=%xh, mbx5=%xh, mbx6=%xh, "
1438 		    "mbx7=%xh\n", mb[0], ha->idc_mb[1], ha->idc_mb[2],
1439 		    ha->idc_mb[3], ha->idc_mb[4], ha->idc_mb[5], ha->idc_mb[6],
1440 		    ha->idc_mb[7]);
1441 
1442 		ADAPTER_STATE_LOCK(ha);
1443 		switch (ha->idc_mb[2]) {
1444 		case IDC_OPC_DRV_START:
1445 			ha->flags |= IDC_RESTART_NEEDED;
1446 			break;
1447 		case IDC_OPC_FLASH_ACC:
1448 		case IDC_OPC_RESTART_MPI:
1449 		case IDC_OPC_PORT_RESET_MBC:
1450 		case IDC_OPC_SET_PORT_CONFIG_MBC:
1451 			ha->flags |= IDC_STALL_NEEDED;
1452 			break;
1453 		default:
1454 			EL(ha, "unknown IDC request opcode=%xh\n",
1455 			    ha->idc_mb[2]);
1456 			break;
1457 		}
1458 		/*
1459 		 * If there is a timeout value associated with this IDC
1460 		 * notification then there is an implied requirement
1461 		 * that we return an ACK.
1462 		 */
1463 		if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
1464 			ha->flags |= IDC_ACK_NEEDED;
1465 		}
1466 		ADAPTER_STATE_UNLOCK(ha);
1467 
1468 		ql_awaken_task_daemon(ha, NULL, 0, 0);
1469 		break;
1470 
1471 	case MBA_IDC_TIME_EXTENDED:
1472 		EL(ha, "%xh MBA_IDC_TIME_EXTENDED received, mbx2=%xh\n",
1473 		    mb[0], RD16_IO_REG(ha, mailbox_out[2]));
1474 		break;
1475 
1476 	default:
1477 		EL(ha, "%xh UNKNOWN event received, mbx1=%xh, mbx2=%xh, "
1478 		    "mbx3=%xh\n", mb[0], RD16_IO_REG(ha, mailbox_out[1]),
1479 		    RD16_IO_REG(ha, mailbox_out[2]),
1480 		    RD16_IO_REG(ha, mailbox_out[3]));
1481 		break;
1482 	}
1483 
1484 	QL_PRINT_3(ha, "done\n");
1485 }
1486 
1487 /*
1488  * ql_fast_fcp_post
1489  *	Fast path for good SCSI I/O completion.
1490  *
1491  * Input:
1492  *	sp:	SRB pointer.
1493  *	rsp_q:	response queue structure pointer.
1494  *
1495  * Context:
1496  *	Interrupt or Kernel context, no mailbox commands allowed.
1497  */
1498 static void
ql_fast_fcp_post(ql_srb_t * sp,ql_response_q_t * rsp_q)1499 ql_fast_fcp_post(ql_srb_t *sp, ql_response_q_t *rsp_q)
1500 {
1501 	ql_adapter_state_t	*ha = sp->ha;
1502 	ql_lun_t		*lq = sp->lun_queue;
1503 	ql_tgt_t		*tq = lq->target_queue;
1504 
1505 	QL_PRINT_3(ha, "started\n");
1506 
1507 	/* Acquire device queue lock. */
1508 	DEVICE_QUEUE_LOCK(tq);
1509 
1510 	/* Decrement outstanding commands on device. */
1511 	if (tq->outcnt != 0) {
1512 		tq->outcnt--;
1513 	}
1514 
1515 	if (sp->flags & SRB_FCP_CMD_PKT) {
1516 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_UNTAGGED) {
1517 			/*
1518 			 * Clear the flag for this LUN so that
1519 			 * untagged commands can be submitted
1520 			 * for it.
1521 			 */
1522 			lq->flags &= ~LQF_UNTAGGED_PENDING;
1523 		}
1524 
1525 		if (lq->lun_outcnt != 0) {
1526 			lq->lun_outcnt--;
1527 		}
1528 	}
1529 
1530 	/* Reset port down retry count on good completion. */
1531 	tq->port_down_retry_count = ha->port_down_retry_count;
1532 	tq->qfull_retry_count = ha->qfull_retry_count;
1533 	ha->pha->timeout_cnt = 0;
1534 
1535 	/* Remove command from watchdog queue. */
1536 	if (sp->flags & SRB_WATCHDOG_ENABLED) {
1537 		ql_remove_link(&tq->wdg, &sp->wdg);
1538 		sp->flags &= ~SRB_WATCHDOG_ENABLED;
1539 	}
1540 
1541 	if (lq->cmd.first != NULL) {
1542 		ql_next(ha, lq);
1543 	} else {
1544 		/* Release LU queue specific lock. */
1545 		DEVICE_QUEUE_UNLOCK(tq);
1546 		if (ha->pha->pending_cmds.first != NULL) {
1547 			ql_start_iocb(ha, NULL);
1548 		}
1549 	}
1550 
1551 	/* Sync buffers if required.  */
1552 	if (sp->flags & SRB_MS_PKT) {
1553 		(void) ddi_dma_sync(sp->pkt->pkt_resp_dma, 0, 0,
1554 		    DDI_DMA_SYNC_FORCPU);
1555 	}
1556 
1557 	/* Map ISP completion codes. */
1558 	sp->pkt->pkt_expln = FC_EXPLN_NONE;
1559 	sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
1560 	sp->pkt->pkt_state = FC_PKT_SUCCESS;
1561 
1562 	(void) qlc_fm_check_pkt_dma_handle(ha, sp);
1563 
1564 	/* Now call the pkt completion callback */
1565 	if (sp->flags & SRB_POLL) {
1566 		sp->flags &= ~SRB_POLL;
1567 	} else if (ha->completion_thds == 1 && sp->pkt->pkt_comp &&
1568 	    !(ha->flags & POLL_INTR)) {
1569 		INDX_INTR_UNLOCK(ha, rsp_q->rsp_q_number);
1570 		(*sp->pkt->pkt_comp)(sp->pkt);
1571 		INDX_INTR_LOCK(ha, rsp_q->rsp_q_number);
1572 	} else {
1573 		ql_io_comp(sp);
1574 	}
1575 
1576 	if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1577 	    != DDI_FM_OK) {
1578 		qlc_fm_report_err_impact(ha,
1579 		    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1580 	}
1581 
1582 	QL_PRINT_3(ha, "done\n");
1583 }
1584 
1585 /*
1586  * ql_response_pkt
1587  *	Processes response entry.
1588  *
1589  * Input:
1590  *	ha:		adapter state pointer.
1591  *	rsp_q:		response queue structure pointer.
1592  *	done_q:		head pointer to done queue.
1593  *	set_flags:	task daemon flags to set.
1594  *	reset_flags:	task daemon flags to reset.
1595  *
1596  * Context:
1597  *	Interrupt or Kernel context, no mailbox commands allowed.
1598  */
1599 static void
ql_response_pkt(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)1600 ql_response_pkt(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1601     ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1602 {
1603 	response_t	*pkt;
1604 	uint32_t	dma_sync_size_1 = 0;
1605 	uint32_t	dma_sync_size_2 = 0;
1606 	int		status = 0;
1607 
1608 	QL_PRINT_3(ha, "started\n");
1609 
1610 	if (rsp_q->isp_rsp_index >= rsp_q->rsp_entry_cnt) {
1611 		EL(ha, "index error = %xh, isp_abort_needed",
1612 		    rsp_q->isp_rsp_index);
1613 		*set_flags |= ISP_ABORT_NEEDED;
1614 		return;
1615 	}
1616 
1617 	if ((ha->flags & ONLINE) == 0) {
1618 		QL_PRINT_10(ha, "not onlne, done\n");
1619 		return;
1620 	}
1621 
1622 	/* Calculate size of response queue entries to sync. */
1623 	if (rsp_q->isp_rsp_index > rsp_q->rsp_ring_index) {
1624 		dma_sync_size_1 = (uint32_t)
1625 		    ((uint32_t)(rsp_q->isp_rsp_index - rsp_q->rsp_ring_index) *
1626 		    RESPONSE_ENTRY_SIZE);
1627 	} else if (rsp_q->isp_rsp_index == 0) {
1628 		dma_sync_size_1 = (uint32_t)
1629 		    ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1630 		    RESPONSE_ENTRY_SIZE);
1631 	} else {
1632 		/* Responses wrap around the Q */
1633 		dma_sync_size_1 = (uint32_t)
1634 		    ((uint32_t)(rsp_q->rsp_entry_cnt - rsp_q->rsp_ring_index) *
1635 		    RESPONSE_ENTRY_SIZE);
1636 		dma_sync_size_2 = (uint32_t)
1637 		    (rsp_q->isp_rsp_index * RESPONSE_ENTRY_SIZE);
1638 	}
1639 
1640 	/* Sync DMA buffer. */
1641 	(void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
1642 	    (off_t)(rsp_q->rsp_ring_index * RESPONSE_ENTRY_SIZE),
1643 	    dma_sync_size_1, DDI_DMA_SYNC_FORCPU);
1644 	if (dma_sync_size_2) {
1645 		(void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle, 0,
1646 		    dma_sync_size_2, DDI_DMA_SYNC_FORCPU);
1647 	}
1648 
1649 	if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1650 	    != DDI_FM_OK) {
1651 		qlc_fm_report_err_impact(ha,
1652 		    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1653 	}
1654 
1655 	while (rsp_q->rsp_ring_index != rsp_q->isp_rsp_index) {
1656 		pkt = rsp_q->rsp_ring_ptr;
1657 
1658 		QL_PRINT_5(ha, "ha->rsp_rg_idx=%xh, mbx[5]=%xh\n",
1659 		    rsp_q->rsp_ring_index, rsp_q->isp_rsp_index);
1660 		QL_DUMP_5((uint8_t *)rsp_q->rsp_ring_ptr, 8,
1661 		    RESPONSE_ENTRY_SIZE);
1662 
1663 		/* Adjust ring index. */
1664 		rsp_q->rsp_ring_index++;
1665 		if (rsp_q->rsp_ring_index == rsp_q->rsp_entry_cnt) {
1666 			rsp_q->rsp_ring_index = 0;
1667 			rsp_q->rsp_ring_ptr = rsp_q->rsp_ring.bp;
1668 		} else {
1669 			rsp_q->rsp_ring_ptr++;
1670 		}
1671 
1672 		/* Process packet. */
1673 		if (rsp_q->status_srb != NULL &&
1674 		    pkt->entry_type != STATUS_CONT_TYPE) {
1675 			ql_add_link_b(done_q, &rsp_q->status_srb->cmd);
1676 			rsp_q->status_srb = NULL;
1677 		}
1678 
1679 		pkt->entry_status = (uint8_t)
1680 		    (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1681 		    pkt->entry_status & 0x3c : pkt->entry_status & 0x7e);
1682 
1683 		if (pkt->entry_status != 0 ||
1684 		    pkt->entry_type == ABORTED_ENTRY_TYPE) {
1685 			ql_error_entry(ha, rsp_q,
1686 			    pkt, done_q,
1687 			    set_flags, reset_flags);
1688 		} else {
1689 			switch (pkt->entry_type) {
1690 			case STATUS_TYPE:
1691 				status |= CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1692 				    ql_24xx_status_entry(ha, rsp_q,
1693 				    (sts_24xx_entry_t *)pkt, done_q,
1694 				    set_flags, reset_flags) :
1695 				    ql_status_entry(ha, rsp_q,
1696 				    (sts_entry_t *)pkt,
1697 				    done_q, set_flags, reset_flags);
1698 				break;
1699 			case STATUS_CONT_TYPE:
1700 				ql_status_cont_entry(ha, rsp_q,
1701 				    (sts_cont_entry_t *)pkt, done_q,
1702 				    set_flags, reset_flags);
1703 				break;
1704 			case IP_TYPE:
1705 			case IP_A64_TYPE:
1706 			case IP_CMD_TYPE:
1707 				ql_ip_entry(ha, rsp_q,
1708 				    (ip_entry_t *)pkt, done_q,
1709 				    set_flags, reset_flags);
1710 				break;
1711 			case IP_RECEIVE_TYPE:
1712 				ql_ip_rcv_entry(ha, rsp_q,
1713 				    (ip_rcv_entry_t *)pkt, done_q,
1714 				    set_flags, reset_flags);
1715 				break;
1716 			case IP_RECEIVE_CONT_TYPE:
1717 				ql_ip_rcv_cont_entry(ha, rsp_q,
1718 				    (ip_rcv_cont_entry_t *)pkt,	done_q,
1719 				    set_flags, reset_flags);
1720 				break;
1721 			case IP_24XX_RECEIVE_TYPE:
1722 				ql_ip_24xx_rcv_entry(ha, rsp_q,
1723 				    (ip_rcv_24xx_entry_t *)pkt, done_q,
1724 				    set_flags, reset_flags);
1725 				break;
1726 			case MS_TYPE:
1727 				ql_ms_entry(ha, rsp_q,
1728 				    (ms_entry_t *)pkt, done_q,
1729 				    set_flags, reset_flags);
1730 				break;
1731 			case REPORT_ID_TYPE:
1732 				ql_report_id_entry(ha, rsp_q,
1733 				    (report_id_acq_t *)pkt, done_q,
1734 				    set_flags, reset_flags);
1735 				break;
1736 			case ELS_PASSTHRU_TYPE:
1737 				ql_els_passthru_entry(ha, rsp_q,
1738 				    (els_passthru_entry_rsp_t *)pkt, done_q,
1739 				    set_flags, reset_flags);
1740 				break;
1741 			case IP_BUF_POOL_TYPE:
1742 			case MARKER_TYPE:
1743 			case VP_MODIFY_TYPE:
1744 			case VP_CONTROL_TYPE:
1745 				break;
1746 			default:
1747 				EL(ha, "Unknown IOCB entry type=%xh\n",
1748 				    pkt->entry_type);
1749 				break;
1750 			}
1751 		}
1752 	}
1753 
1754 	/* Inform RISC of processed responses. */
1755 
1756 	if (ha->flags & MULTI_QUEUE) {
1757 		WR32_MBAR_REG(ha, rsp_q->mbar_rsp_out, rsp_q->rsp_ring_index);
1758 	} else {
1759 		WRT16_IO_REG(ha, resp_out, rsp_q->rsp_ring_index);
1760 	}
1761 
1762 	if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
1763 	    != DDI_FM_OK) {
1764 		qlc_fm_report_err_impact(ha,
1765 		    QL_FM_EREPORT_ACC_HANDLE_CHECK);
1766 	}
1767 
1768 	/* RESET packet received delay for possible async event. */
1769 	if (status & BIT_0) {
1770 		drv_usecwait(500000);
1771 	}
1772 
1773 	QL_PRINT_3(ha, "done\n");
1774 }
1775 
1776 /*
1777  * ql_error_entry
1778  *	Processes error entry.
1779  *
1780  * Input:
1781  *	ha:		adapter state pointer.
1782  *	rsp_q:		response queue structure pointer.
1783  *	pkt:		entry pointer.
1784  *	done_q:		head pointer to done queue.
1785  *	set_flags:	task daemon flags to set.
1786  *	reset_flags:	task daemon flags to reset.
1787  *
1788  * Context:
1789  *	Interrupt or Kernel context, no mailbox commands allowed.
1790  */
1791 /* ARGSUSED */
1792 static void
ql_error_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,response_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)1793 ql_error_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, response_t *pkt,
1794     ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
1795 {
1796 	ql_srb_t	*sp = NULL;
1797 	uint32_t	index, resp_identifier;
1798 
1799 	if (pkt->entry_type == ABORTED_ENTRY_TYPE) {
1800 		resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
1801 		    &pkt->handle);
1802 		index = resp_identifier & OSC_INDEX_MASK;
1803 		if (index < ha->osc_max_cnt) {
1804 			if (ha->outstanding_cmds[index] ==
1805 			    QL_ABORTED_SRB(ha)) {
1806 				EL(ha, "Aborted command sp=QL_ABORTED_SRB, "
1807 				    "handle=%xh\n", resp_identifier);
1808 				ha->outstanding_cmds[index] = NULL;
1809 			} else {
1810 				EL(ha, "Aborted command sp=%ph, handle=%xh\n",
1811 				    (void *) ha->outstanding_cmds[index],
1812 				    resp_identifier);
1813 			}
1814 		} else {
1815 			EL(ha, "Aborted command handle=%xh, out of range "
1816 			    "index=%xh\n", resp_identifier, index);
1817 		}
1818 		return;
1819 	}
1820 
1821 	QL_PRINT_2(ha, "started, packet:\n");
1822 	QL_DUMP_2((uint8_t *)pkt, 8, RESPONSE_ENTRY_SIZE);
1823 
1824 	if (pkt->entry_status & BIT_6) {
1825 		EL(ha, "Request Queue DMA error\n");
1826 	} else if (pkt->entry_status & BIT_5) {
1827 		EL(ha, "Invalid Entry Order\n");
1828 	} else if (pkt->entry_status & BIT_4) {
1829 		EL(ha, "Invalid Entry Count\n");
1830 	} else if (pkt->entry_status & BIT_3) {
1831 		EL(ha, "Invalid Entry Parameter\n");
1832 	} else if (pkt->entry_status & BIT_2) {
1833 		EL(ha, "Invalid Entry Type\n");
1834 	} else if (pkt->entry_status & BIT_1) {
1835 		EL(ha, "Busy\n");
1836 	} else {
1837 		EL(ha, "UNKNOWN flag = %xh error\n", pkt->entry_status);
1838 	}
1839 
1840 	/* Validate the response entry handle. */
1841 	resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1842 	index = resp_identifier & OSC_INDEX_MASK;
1843 	if (index < ha->osc_max_cnt) {
1844 		/* the index seems reasonable */
1845 		if ((sp = ha->outstanding_cmds[index]) == NULL) {
1846 			sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1847 			    (uint32_t *)&pkt->handle,
1848 			    (uint32_t *)&resp_identifier, set_flags,
1849 			    reset_flags);
1850 		}
1851 		if (sp != NULL) {
1852 			if (sp == QL_ABORTED_SRB(ha)) {
1853 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1854 				    resp_identifier);
1855 				sp = NULL;
1856 				ha->outstanding_cmds[index] = NULL;
1857 			} else if (sp->handle == resp_identifier) {
1858 				/* Neo, you're the one... */
1859 				ha->outstanding_cmds[index] = NULL;
1860 				sp->handle = 0;
1861 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1862 			} else {
1863 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1864 				    resp_identifier, sp->handle);
1865 				sp = NULL;
1866 				ql_signal_abort(ha, set_flags);
1867 			}
1868 		}
1869 	} else {
1870 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1871 		    index, resp_identifier);
1872 		ql_signal_abort(ha, set_flags);
1873 	}
1874 
1875 	if (sp != NULL) {
1876 		/* Bad payload or header */
1877 		if (pkt->entry_status & (BIT_5 + BIT_4 + BIT_3 + BIT_2)) {
1878 			/* Bad payload or header, set error status. */
1879 			sp->pkt->pkt_reason = CS_BAD_PAYLOAD;
1880 		} else if (pkt->entry_status & BIT_1) /* FULL flag */ {
1881 			sp->pkt->pkt_reason = CS_QUEUE_FULL;
1882 		} else {
1883 			/* Set error status. */
1884 			sp->pkt->pkt_reason = CS_UNKNOWN;
1885 		}
1886 
1887 		/* Set completed status. */
1888 		sp->flags |= SRB_ISP_COMPLETED;
1889 
1890 		/* Place command on done queue. */
1891 		ql_add_link_b(done_q, &sp->cmd);
1892 
1893 	}
1894 	QL_PRINT_3(ha, "done\n");
1895 }
1896 
1897 /*
1898  * ql_status_entry
1899  *	Processes received ISP2200-2300 status entry.
1900  *
1901  * Input:
1902  *	ha:		adapter state pointer.
1903  *	rsp_q:		response queue structure pointer.
1904  *	pkt:		entry pointer.
1905  *	done_q:		done queue pointer.
1906  *	set_flags:	task daemon flags to set.
1907  *	reset_flags:	task daemon flags to reset.
1908  *
1909  * Returns:
1910  *	BIT_0 = CS_RESET status received.
1911  *
1912  * Context:
1913  *	Interrupt or Kernel context, no mailbox commands allowed.
1914  */
1915 /* ARGSUSED */
1916 static int
ql_status_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,sts_entry_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)1917 ql_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
1918     sts_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
1919     uint64_t *reset_flags)
1920 {
1921 	ql_srb_t		*sp = NULL;
1922 	uint32_t		index, resp_identifier;
1923 	uint16_t		comp_status;
1924 	int			rval = 0;
1925 
1926 	QL_PRINT_3(ha, "started\n");
1927 
1928 	/* Validate the response entry handle. */
1929 	resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
1930 	index = resp_identifier & OSC_INDEX_MASK;
1931 	if (index < ha->osc_max_cnt) {
1932 		/* the index seems reasonable */
1933 		if ((sp = ha->outstanding_cmds[index]) == NULL) {
1934 			sp = ql_verify_preprocessed_cmd(ha, rsp_q,
1935 			    (uint32_t *)&pkt->handle,
1936 			    (uint32_t *)&resp_identifier, set_flags,
1937 			    reset_flags);
1938 		}
1939 		if (sp != NULL) {
1940 			if (sp == QL_ABORTED_SRB(ha)) {
1941 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
1942 				    resp_identifier);
1943 				sp = NULL;
1944 				ha->outstanding_cmds[index] = NULL;
1945 			} else if (sp->handle == resp_identifier) {
1946 				/* Neo, you're the one... */
1947 				ha->outstanding_cmds[index] = NULL;
1948 				sp->handle = 0;
1949 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
1950 			} else {
1951 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
1952 				    resp_identifier, sp->handle);
1953 				sp = NULL;
1954 				ql_signal_abort(ha, set_flags);
1955 			}
1956 		}
1957 	} else {
1958 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
1959 		    index, resp_identifier);
1960 		ql_signal_abort(ha, set_flags);
1961 	}
1962 
1963 	if (sp != NULL) {
1964 		comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
1965 		    &pkt->comp_status);
1966 
1967 		/*
1968 		 * We dont care about SCSI QFULLs.
1969 		 */
1970 		if (comp_status == CS_QUEUE_FULL) {
1971 			EL(ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
1972 			    sp->lun_queue->target_queue->d_id.b24,
1973 			    sp->lun_queue->lun_no);
1974 			comp_status = CS_COMPLETE;
1975 		}
1976 
1977 		/*
1978 		 * 2300 firmware marks completion status as data underrun
1979 		 * for scsi qfulls. Make it transport complete.
1980 		 */
1981 		if (CFG_IST(ha, CFG_CTRL_2363) &&
1982 		    comp_status == CS_DATA_UNDERRUN &&
1983 		    pkt->scsi_status_l != STATUS_GOOD) {
1984 			comp_status = CS_COMPLETE;
1985 		}
1986 
1987 		/*
1988 		 * Workaround T3 issue where we do not get any data xferred
1989 		 * but get back a good status.
1990 		 */
1991 		if ((pkt->state_flags_h & SF_XFERRED_DATA) == 0 &&
1992 		    comp_status == CS_COMPLETE &&
1993 		    pkt->scsi_status_l == STATUS_GOOD &&
1994 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
1995 		    pkt->residual_length == 0 &&
1996 		    sp->fcp &&
1997 		    sp->fcp->fcp_data_len != 0 &&
1998 		    (pkt->state_flags_l & (SF_DATA_OUT | SF_DATA_IN)) ==
1999 		    SF_DATA_OUT) {
2000 			comp_status = CS_ABORTED;
2001 		}
2002 
2003 		if (sp->flags & SRB_MS_PKT) {
2004 			/*
2005 			 * Ideally it should never be true. But there
2006 			 * is a bug in FW which upon receiving invalid
2007 			 * parameters in MS IOCB returns it as
2008 			 * status entry and not as ms entry type.
2009 			 */
2010 			ql_ms_entry(ha, rsp_q, (ms_entry_t *)pkt, done_q,
2011 			    set_flags, reset_flags);
2012 			QL_PRINT_3(ha, "ql_ms_entry done\n");
2013 			return (0);
2014 		}
2015 
2016 		/*
2017 		 * Fast path to good SCSI I/O completion
2018 		 */
2019 		if (comp_status == CS_COMPLETE &&
2020 		    pkt->scsi_status_l == STATUS_GOOD &&
2021 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2022 			/* Set completed status. */
2023 			sp->flags |= SRB_ISP_COMPLETED;
2024 			sp->pkt->pkt_reason = comp_status;
2025 			ql_fast_fcp_post(sp, rsp_q);
2026 			QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2027 			return (0);
2028 		}
2029 		rval = ql_status_error(ha, rsp_q, sp, pkt, done_q, set_flags,
2030 		    reset_flags);
2031 	}
2032 	QL_PRINT_3(ha, "done\n");
2033 
2034 	return (rval);
2035 }
2036 
2037 /*
2038  * ql_24xx_status_entry
2039  *	Processes received ISP24xx status entry.
2040  *
2041  * Input:
2042  *	ha:		adapter state pointer.
2043  *	rsp_q:		response queue structure pointer.
2044  *	pkt:		entry pointer.
2045  *	done_q:		done queue pointer.
2046  *	set_flags:	task daemon flags to set.
2047  *	reset_flags:	task daemon flags to reset.
2048  *
2049  * Returns:
2050  *	BIT_0 = CS_RESET status received.
2051  *
2052  * Context:
2053  *	Interrupt or Kernel context, no mailbox commands allowed.
2054  */
2055 /* ARGSUSED */
2056 static int
ql_24xx_status_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,sts_24xx_entry_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)2057 ql_24xx_status_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2058     sts_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2059     uint64_t *reset_flags)
2060 {
2061 	ql_srb_t		*sp = NULL;
2062 	uint16_t		comp_status;
2063 	uint32_t		index, resp_identifier;
2064 	int			rval = 0;
2065 
2066 	QL_PRINT_3(ha, "started\n");
2067 
2068 	/* Validate the response entry handle. */
2069 	resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &pkt->handle);
2070 	index = resp_identifier & OSC_INDEX_MASK;
2071 	if (index < ha->osc_max_cnt) {
2072 		/* the index seems reasonable */
2073 		if ((sp = ha->outstanding_cmds[index]) == NULL) {
2074 			sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2075 			    (uint32_t *)&pkt->handle,
2076 			    (uint32_t *)&resp_identifier, set_flags,
2077 			    reset_flags);
2078 		}
2079 		if (sp != NULL) {
2080 			if (sp == QL_ABORTED_SRB(ha)) {
2081 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2082 				    resp_identifier);
2083 				sp = NULL;
2084 				ha->outstanding_cmds[index] = NULL;
2085 			} else if (sp->handle == resp_identifier) {
2086 				/* Neo, you're the one... */
2087 				ha->outstanding_cmds[index] = NULL;
2088 				sp->handle = 0;
2089 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2090 			} else {
2091 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2092 				    resp_identifier, sp->handle);
2093 				sp = NULL;
2094 				ql_signal_abort(ha, set_flags);
2095 			}
2096 		}
2097 	} else {
2098 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2099 		    index, resp_identifier);
2100 		ql_signal_abort(ha, set_flags);
2101 	}
2102 
2103 	if (sp != NULL) {
2104 		comp_status = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2105 		    &pkt->comp_status);
2106 
2107 		/* We dont care about SCSI QFULLs. */
2108 		if (comp_status == CS_QUEUE_FULL) {
2109 			EL(sp->ha, "CS_QUEUE_FULL, d_id=%xh, lun=%xh\n",
2110 			    sp->lun_queue->target_queue->d_id.b24,
2111 			    sp->lun_queue->lun_no);
2112 			comp_status = CS_COMPLETE;
2113 		}
2114 
2115 		/*
2116 		 * 2300 firmware marks completion status as data underrun
2117 		 * for scsi qfulls. Make it transport complete.
2118 		 */
2119 		if (comp_status == CS_DATA_UNDERRUN &&
2120 		    pkt->scsi_status_l != STATUS_GOOD) {
2121 			comp_status = CS_COMPLETE;
2122 		}
2123 
2124 		/*
2125 		 * Workaround T3 issue where we do not get any data xferred
2126 		 * but get back a good status.
2127 		 */
2128 		if (comp_status == CS_COMPLETE &&
2129 		    pkt->scsi_status_l == STATUS_GOOD &&
2130 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0 &&
2131 		    pkt->residual_length != 0 &&
2132 		    sp->fcp &&
2133 		    sp->fcp->fcp_data_len != 0 &&
2134 		    sp->fcp->fcp_cntl.cntl_write_data) {
2135 			comp_status = CS_ABORTED;
2136 		}
2137 
2138 		/*
2139 		 * Fast path to good SCSI I/O completion
2140 		 */
2141 		if (comp_status == CS_COMPLETE &&
2142 		    pkt->scsi_status_l == STATUS_GOOD &&
2143 		    (pkt->scsi_status_h & FCP_RSP_MASK) == 0) {
2144 			/* Set completed status. */
2145 			sp->flags |= SRB_ISP_COMPLETED;
2146 			sp->pkt->pkt_reason = comp_status;
2147 			ql_fast_fcp_post(sp, rsp_q);
2148 			QL_PRINT_3(ha, "ql_fast_fcp_post done\n");
2149 			return (0);
2150 		}
2151 		rval = ql_status_error(ha, rsp_q, sp, (sts_entry_t *)pkt,
2152 		    done_q, set_flags, reset_flags);
2153 	}
2154 	QL_PRINT_3(ha, "done\n");
2155 
2156 	return (rval);
2157 }
2158 
2159 /*
2160  * ql_verify_preprocessed_cmd
2161  *	Handles preprocessed cmds..
2162  *
2163  * Input:
2164  *	ha:			adapter state pointer.
2165  *	rsp_q:			response queue structure pointer.
2166  *	pkt_handle:		handle pointer.
2167  *	resp_identifier:	resp_identifier pointer.
2168  *	set_flags:		task daemon flags to set.
2169  *	reset_flags:		task daemon flags to reset.
2170  *
2171  * Returns:
2172  *	srb pointer or NULL
2173  *
2174  * Context:
2175  *	Interrupt or Kernel context, no mailbox commands allowed.
2176  */
2177 /* ARGSUSED */
2178 ql_srb_t *
ql_verify_preprocessed_cmd(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,uint32_t * pkt_handle,uint32_t * resp_identifier,uint64_t * set_flags,uint64_t * reset_flags)2179 ql_verify_preprocessed_cmd(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2180     uint32_t *pkt_handle, uint32_t *resp_identifier, uint64_t *set_flags,
2181     uint64_t *reset_flags)
2182 {
2183 	ql_srb_t		*sp = NULL;
2184 	uint32_t		index;
2185 	uint32_t		get_handle = 10;
2186 
2187 	while (get_handle) {
2188 		/* Get handle. */
2189 		*resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2190 		    pkt_handle);
2191 		index = *resp_identifier & OSC_INDEX_MASK;
2192 		/* Validate handle. */
2193 		if (index < ha->osc_max_cnt) {
2194 			sp = ha->outstanding_cmds[index];
2195 		}
2196 
2197 		if (sp != NULL) {
2198 			EL(ha, "sp=%xh, resp_id=%xh, get=%d, index=%xh\n", sp,
2199 			    *resp_identifier, get_handle, index);
2200 			break;
2201 		} else {
2202 			get_handle -= 1;
2203 			drv_usecwait(10000);
2204 			if (get_handle == 1 && rsp_q->rsp_ring.dma_handle) {
2205 				/* Last chance, Sync whole DMA buffer. */
2206 				(void) ddi_dma_sync(rsp_q->rsp_ring.dma_handle,
2207 				    0, 0, DDI_DMA_SYNC_FORCPU);
2208 				EL(ha, "last chance DMA sync, index=%xh\n",
2209 				    index);
2210 			}
2211 		}
2212 	}
2213 	QL_PRINT_3(ha, "done\n");
2214 
2215 	return (sp);
2216 }
2217 
2218 
2219 /*
2220  * ql_status_error
2221  *	Processes received ISP status entry error.
2222  *
2223  * Input:
2224  *	ha:		adapter state pointer.
2225  *	rsp_q:		response queue structure pointer.
2226  *	sp:		SRB pointer.
2227  *	pkt:		entry pointer.
2228  *	done_q:		done queue pointer.
2229  *	set_flags:	task daemon flags to set.
2230  *	reset_flags:	task daemon flags to reset.
2231  *
2232  * Returns:
2233  *	BIT_0 = CS_RESET status received.
2234  *
2235  * Context:
2236  *	Interrupt or Kernel context, no mailbox commands allowed.
2237  */
2238 /* ARGSUSED */
2239 static int
ql_status_error(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ql_srb_t * sp,sts_entry_t * pkt23,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)2240 ql_status_error(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ql_srb_t *sp,
2241     sts_entry_t *pkt23, ql_head_t *done_q, uint64_t *set_flags,
2242     uint64_t *reset_flags)
2243 {
2244 	uint32_t		sense_sz = 0;
2245 	uint32_t		cnt;
2246 	ql_tgt_t		*tq;
2247 	fcp_rsp_t		*fcpr;
2248 	struct fcp_rsp_info	*rsp;
2249 	int			rval = 0;
2250 
2251 	struct {
2252 		uint8_t		*rsp_info;
2253 		uint8_t		*req_sense_data;
2254 		uint32_t	residual_length;
2255 		uint32_t	fcp_residual_length;
2256 		uint32_t	rsp_info_length;
2257 		uint32_t	req_sense_length;
2258 		uint16_t	comp_status;
2259 		uint8_t		state_flags_l;
2260 		uint8_t		state_flags_h;
2261 		uint8_t		scsi_status_l;
2262 		uint8_t		scsi_status_h;
2263 	} sts;
2264 
2265 	QL_PRINT_3(ha, "started\n");
2266 
2267 	if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2268 		sts_24xx_entry_t *pkt24 = (sts_24xx_entry_t *)pkt23;
2269 
2270 		/* Setup status. */
2271 		sts.comp_status = (uint16_t)ddi_get16(
2272 		    rsp_q->rsp_ring.acc_handle, &pkt24->comp_status);
2273 		sts.scsi_status_l = pkt24->scsi_status_l;
2274 		sts.scsi_status_h = pkt24->scsi_status_h;
2275 
2276 		/* Setup firmware residuals. */
2277 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2278 		    ddi_get32(rsp_q->rsp_ring.acc_handle,
2279 		    (uint32_t *)&pkt24->residual_length) : 0;
2280 
2281 		/* Setup FCP residuals. */
2282 		sts.fcp_residual_length = sts.scsi_status_h &
2283 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2284 		    ddi_get32(rsp_q->rsp_ring.acc_handle,
2285 		    (uint32_t *)&pkt24->fcp_rsp_residual_count) : 0;
2286 
2287 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2288 		    (sts.scsi_status_h & FCP_RESID_UNDER) &&
2289 		    (sts.residual_length != pkt24->fcp_rsp_residual_count)) {
2290 
2291 			EL(sp->ha, "mismatch resid's: fw=%xh, pkt=%xh\n",
2292 			    sts.residual_length,
2293 			    pkt24->fcp_rsp_residual_count);
2294 			sts.scsi_status_h = (uint8_t)
2295 			    (sts.scsi_status_h & ~FCP_RESID_UNDER);
2296 		}
2297 
2298 		/* Setup state flags. */
2299 		sts.state_flags_l = pkt24->state_flags_l;
2300 		sts.state_flags_h = pkt24->state_flags_h;
2301 
2302 		if (sp->fcp->fcp_data_len &&
2303 		    (sts.comp_status != CS_DATA_UNDERRUN ||
2304 		    sts.residual_length != sp->fcp->fcp_data_len)) {
2305 			sts.state_flags_h = (uint8_t)
2306 			    (sts.state_flags_h | SF_GOT_BUS |
2307 			    SF_GOT_TARGET | SF_SENT_CMD |
2308 			    SF_XFERRED_DATA | SF_GOT_STATUS);
2309 		} else {
2310 			sts.state_flags_h = (uint8_t)
2311 			    (sts.state_flags_h | SF_GOT_BUS |
2312 			    SF_GOT_TARGET | SF_SENT_CMD |
2313 			    SF_GOT_STATUS);
2314 		}
2315 		if (sp->fcp->fcp_cntl.cntl_write_data) {
2316 			sts.state_flags_l = (uint8_t)
2317 			    (sts.state_flags_l | SF_DATA_OUT);
2318 		} else if (sp->fcp->fcp_cntl.cntl_read_data) {
2319 			sts.state_flags_l = (uint8_t)
2320 			    (sts.state_flags_l | SF_DATA_IN);
2321 		}
2322 		if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
2323 			sts.state_flags_l = (uint8_t)
2324 			    (sts.state_flags_l | SF_HEAD_OF_Q);
2325 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
2326 			sts.state_flags_l = (uint8_t)
2327 			    (sts.state_flags_l | SF_ORDERED_Q);
2328 		} else if (sp->fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) {
2329 			sts.state_flags_l = (uint8_t)
2330 			    (sts.state_flags_l | SF_SIMPLE_Q);
2331 		}
2332 
2333 		/* Setup FCP response info. */
2334 		sts.rsp_info = &pkt24->rsp_sense_data[0];
2335 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2336 			sts.rsp_info_length = ddi_get32(
2337 			    rsp_q->rsp_ring.acc_handle,
2338 			    (uint32_t *)&pkt24->fcp_rsp_data_length);
2339 			if (sts.rsp_info_length >
2340 			    sizeof (struct fcp_rsp_info)) {
2341 				sts.rsp_info_length =
2342 				    sizeof (struct fcp_rsp_info);
2343 			}
2344 			for (cnt = 0; cnt < sts.rsp_info_length; cnt += 4) {
2345 				ql_chg_endian(sts.rsp_info + cnt, 4);
2346 			}
2347 		} else {
2348 			sts.rsp_info_length = 0;
2349 		}
2350 
2351 		/* Setup sense data. */
2352 		sts.req_sense_data =
2353 		    &pkt24->rsp_sense_data[sts.rsp_info_length];
2354 		if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2355 			sts.req_sense_length =
2356 			    ddi_get32(rsp_q->rsp_ring.acc_handle,
2357 			    (uint32_t *)&pkt24->fcp_sense_length);
2358 			sts.state_flags_h = (uint8_t)
2359 			    (sts.state_flags_h | SF_ARQ_DONE);
2360 			sense_sz = (uint32_t)
2361 			    (((uintptr_t)pkt24 + sizeof (sts_24xx_entry_t)) -
2362 			    (uintptr_t)sts.req_sense_data);
2363 			for (cnt = 0; cnt < sense_sz; cnt += 4) {
2364 				ql_chg_endian(sts.req_sense_data + cnt, 4);
2365 			}
2366 		} else {
2367 			sts.req_sense_length = 0;
2368 		}
2369 	} else {
2370 		/* Setup status. */
2371 		sts.comp_status = (uint16_t)ddi_get16(
2372 		    rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2373 		sts.scsi_status_l = pkt23->scsi_status_l;
2374 		sts.scsi_status_h = pkt23->scsi_status_h;
2375 
2376 		/* Setup firmware residuals. */
2377 		sts.residual_length = sts.comp_status == CS_DATA_UNDERRUN ?
2378 		    ddi_get32(rsp_q->rsp_ring.acc_handle,
2379 		    (uint32_t *)&pkt23->residual_length) : 0;
2380 
2381 		/* Setup FCP residuals. */
2382 		sts.fcp_residual_length = sts.scsi_status_h &
2383 		    (FCP_RESID_UNDER | FCP_RESID_OVER) ?
2384 		    sts.residual_length : 0;
2385 
2386 		/* Setup state flags. */
2387 		sts.state_flags_l = pkt23->state_flags_l;
2388 		sts.state_flags_h = pkt23->state_flags_h;
2389 
2390 		/* Setup FCP response info. */
2391 		sts.rsp_info = &pkt23->rsp_info[0];
2392 		if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) != 0) {
2393 			sts.rsp_info_length = ddi_get16(
2394 			    rsp_q->rsp_ring.acc_handle,
2395 			    (uint16_t *)&pkt23->rsp_info_length);
2396 			if (sts.rsp_info_length >
2397 			    sizeof (struct fcp_rsp_info)) {
2398 				sts.rsp_info_length =
2399 				    sizeof (struct fcp_rsp_info);
2400 			}
2401 		} else {
2402 			sts.rsp_info_length = 0;
2403 		}
2404 
2405 		/* Setup sense data. */
2406 		sts.req_sense_data = &pkt23->req_sense_data[0];
2407 		sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2408 		    ddi_get16(rsp_q->rsp_ring.acc_handle,
2409 		    (uint16_t *)&pkt23->req_sense_length) : 0;
2410 	}
2411 
2412 	bzero(sp->pkt->pkt_resp, sp->pkt->pkt_rsplen);
2413 
2414 	fcpr = (fcp_rsp_t *)sp->pkt->pkt_resp;
2415 	rsp = (struct fcp_rsp_info *)(sp->pkt->pkt_resp +
2416 	    sizeof (fcp_rsp_t));
2417 
2418 	tq = sp->lun_queue->target_queue;
2419 
2420 	fcpr->fcp_u.fcp_status.scsi_status = sts.scsi_status_l;
2421 	if (sts.scsi_status_h & FCP_RSP_LEN_VALID) {
2422 		fcpr->fcp_u.fcp_status.rsp_len_set = 1;
2423 	}
2424 	if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2425 		fcpr->fcp_u.fcp_status.sense_len_set = 1;
2426 	}
2427 	if (sts.scsi_status_h & FCP_RESID_OVER) {
2428 		fcpr->fcp_u.fcp_status.resid_over = 1;
2429 	}
2430 	if (sts.scsi_status_h & FCP_RESID_UNDER) {
2431 		fcpr->fcp_u.fcp_status.resid_under = 1;
2432 	}
2433 	fcpr->fcp_u.fcp_status.reserved_1 = 0;
2434 
2435 	/* Set ISP completion status */
2436 	sp->pkt->pkt_reason = sts.comp_status;
2437 
2438 	/* Update statistics. */
2439 	if ((sts.scsi_status_h & FCP_RSP_LEN_VALID) &&
2440 	    (sp->pkt->pkt_rsplen > sizeof (fcp_rsp_t))) {
2441 
2442 		sense_sz = sp->pkt->pkt_rsplen - (uint32_t)sizeof (fcp_rsp_t);
2443 		if (sense_sz > sts.rsp_info_length) {
2444 			sense_sz = sts.rsp_info_length;
2445 		}
2446 
2447 		/* copy response information data. */
2448 		if (sense_sz) {
2449 			ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2450 			    (uint8_t *)rsp, sts.rsp_info, sense_sz,
2451 			    DDI_DEV_AUTOINCR);
2452 		}
2453 		fcpr->fcp_response_len = sense_sz;
2454 
2455 		rsp = (struct fcp_rsp_info *)((caddr_t)rsp +
2456 		    fcpr->fcp_response_len);
2457 
2458 		switch (*(sts.rsp_info + 3)) {
2459 		case FCP_NO_FAILURE:
2460 			break;
2461 		case FCP_DL_LEN_MISMATCH:
2462 			ha->adapter_stats->d_stats[lobyte(
2463 			    tq->loop_id)].dl_len_mismatches++;
2464 			break;
2465 		case FCP_CMND_INVALID:
2466 			break;
2467 		case FCP_DATA_RO_MISMATCH:
2468 			ha->adapter_stats->d_stats[lobyte(
2469 			    tq->loop_id)].data_ro_mismatches++;
2470 			break;
2471 		case FCP_TASK_MGMT_NOT_SUPPTD:
2472 			break;
2473 		case FCP_TASK_MGMT_FAILED:
2474 			ha->adapter_stats->d_stats[lobyte(
2475 			    tq->loop_id)].task_mgmt_failures++;
2476 			break;
2477 		default:
2478 			break;
2479 		}
2480 	} else {
2481 		/*
2482 		 * EL(sp->ha, "scsi_h=%xh, pkt_rsplen=%xh\n",
2483 		 *   sts.scsi_status_h, sp->pkt->pkt_rsplen);
2484 		 */
2485 		fcpr->fcp_response_len = 0;
2486 	}
2487 
2488 	/* Set reset status received. */
2489 	if (sts.comp_status == CS_RESET && LOOP_READY(ha)) {
2490 		*set_flags |= MARKER_NEEDED;
2491 		rval |= BIT_0;
2492 	}
2493 
2494 	if (!(tq->flags & TQF_TAPE_DEVICE) &&
2495 	    (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) ||
2496 	    ha->loop_down_abort_time < LOOP_DOWN_TIMER_START) &&
2497 	    ha->task_daemon_flags & LOOP_DOWN) {
2498 		EL(sp->ha, "Loop Not Ready Retry, d_id=%xh, lun=%xh\n",
2499 		    tq->d_id.b24, sp->lun_queue->lun_no);
2500 
2501 		/* Set retry status. */
2502 		sp->flags |= SRB_RETRY;
2503 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2504 	    tq->port_down_retry_count != 0 &&
2505 	    (sts.comp_status == CS_INCOMPLETE ||
2506 	    sts.comp_status == CS_PORT_UNAVAILABLE ||
2507 	    sts.comp_status == CS_PORT_LOGGED_OUT ||
2508 	    sts.comp_status == CS_PORT_CONFIG_CHG ||
2509 	    sts.comp_status == CS_PORT_BUSY)) {
2510 		EL(sp->ha, "Port Down Retry=%xh, d_id=%xh, lun=%xh, count=%d"
2511 		    "\n", sts.comp_status, tq->d_id.b24, sp->lun_queue->lun_no,
2512 		    tq->port_down_retry_count);
2513 
2514 		/* Set retry status. */
2515 		sp->flags |= SRB_RETRY;
2516 
2517 		if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2518 			/* Acquire device queue lock. */
2519 			DEVICE_QUEUE_LOCK(tq);
2520 
2521 			tq->flags |= TQF_QUEUE_SUSPENDED;
2522 
2523 			/* Decrement port down count. */
2524 			if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
2525 				tq->port_down_retry_count--;
2526 			}
2527 
2528 			DEVICE_QUEUE_UNLOCK(tq);
2529 
2530 			if ((ha->task_daemon_flags & ABORT_ISP_ACTIVE)
2531 			    == 0 &&
2532 			    (sts.comp_status == CS_PORT_LOGGED_OUT ||
2533 			    sts.comp_status == CS_PORT_UNAVAILABLE)) {
2534 				sp->ha->adapter_stats->d_stats[lobyte(
2535 				    tq->loop_id)].logouts_recvd++;
2536 				ql_send_logo(sp->ha, tq, done_q);
2537 			}
2538 
2539 			ADAPTER_STATE_LOCK(ha);
2540 			if (ha->port_retry_timer == 0) {
2541 				if ((ha->port_retry_timer =
2542 				    ha->port_down_retry_delay) == 0) {
2543 					*set_flags |=
2544 					    PORT_RETRY_NEEDED;
2545 				}
2546 			}
2547 			ADAPTER_STATE_UNLOCK(ha);
2548 		}
2549 	} else if (!(tq->flags & TQF_TAPE_DEVICE) &&
2550 	    (sts.comp_status == CS_RESET ||
2551 	    (sts.comp_status == CS_QUEUE_FULL && tq->qfull_retry_count != 0) ||
2552 	    (sts.comp_status == CS_ABORTED && !(sp->flags & SRB_ABORTING)))) {
2553 		if (sts.comp_status == CS_RESET) {
2554 			EL(sp->ha, "Reset Retry, d_id=%xh, lun=%xh\n",
2555 			    tq->d_id.b24, sp->lun_queue->lun_no);
2556 		} else if (sts.comp_status == CS_QUEUE_FULL) {
2557 			EL(sp->ha, "Queue Full Retry, d_id=%xh, lun=%xh, "
2558 			    "cnt=%d\n", tq->d_id.b24, sp->lun_queue->lun_no,
2559 			    tq->qfull_retry_count);
2560 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2561 				tq->flags |= TQF_QUEUE_SUSPENDED;
2562 
2563 				tq->qfull_retry_count--;
2564 
2565 				ADAPTER_STATE_LOCK(ha);
2566 				if (ha->port_retry_timer == 0) {
2567 					if ((ha->port_retry_timer =
2568 					    ha->qfull_retry_delay) ==
2569 					    0) {
2570 						*set_flags |=
2571 						    PORT_RETRY_NEEDED;
2572 					}
2573 				}
2574 				ADAPTER_STATE_UNLOCK(ha);
2575 			}
2576 		} else {
2577 			EL(sp->ha, "Abort Retry, d_id=%xh, lun=%xh\n",
2578 			    tq->d_id.b24, sp->lun_queue->lun_no);
2579 
2580 			if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) && LOOP_READY(ha)) {
2581 				*set_flags |= MARKER_NEEDED;
2582 				rval |= BIT_0;
2583 			}
2584 		}
2585 
2586 		/* Set retry status. */
2587 		sp->flags |= SRB_RETRY;
2588 	} else {
2589 		fcpr->fcp_resid =
2590 		    sts.fcp_residual_length > sp->fcp->fcp_data_len ?
2591 		    sp->fcp->fcp_data_len : sts.fcp_residual_length;
2592 
2593 		if ((sts.comp_status == CS_DATA_UNDERRUN) &&
2594 		    (sts.scsi_status_h & FCP_RESID_UNDER) == 0) {
2595 
2596 			if (sts.scsi_status_l == STATUS_CHECK) {
2597 				sp->pkt->pkt_reason = CS_COMPLETE;
2598 			} else {
2599 				EL(ha, "transport error - "
2600 				    "underrun & invalid resid\n");
2601 				EL(ha, "ssh=%xh, ssl=%xh\n",
2602 				    sts.scsi_status_h, sts.scsi_status_l);
2603 				sp->pkt->pkt_reason = CS_FCP_RESPONSE_ERROR;
2604 			}
2605 		}
2606 
2607 		/* Ignore firmware underrun error. */
2608 		if (sts.comp_status == CS_DATA_UNDERRUN &&
2609 		    (sts.scsi_status_h & FCP_RESID_UNDER ||
2610 		    (sts.scsi_status_l != STATUS_CHECK &&
2611 		    sts.scsi_status_l != STATUS_GOOD))) {
2612 			sp->pkt->pkt_reason = CS_COMPLETE;
2613 		}
2614 
2615 		if (sp->pkt->pkt_reason != CS_COMPLETE) {
2616 			ha->xioctl->DeviceErrorCount++;
2617 			EL(sp->ha, "Cmplt status err = %xh, d_id=%xh, lun=%xh,"
2618 			    " pkt_reason=%xh, spf=%xh, sp=%ph\n",
2619 			    sts.comp_status, tq->d_id.b24,
2620 			    sp->lun_queue->lun_no, sp->pkt->pkt_reason,
2621 			    sp->flags, sp);
2622 		}
2623 
2624 		/* Set target request sense data. */
2625 		if (sts.scsi_status_l == STATUS_CHECK) {
2626 			if (sts.scsi_status_h & FCP_SNS_LEN_VALID) {
2627 
2628 				if (sp->pkt->pkt_reason == CS_COMPLETE &&
2629 				    sts.req_sense_data[2] != KEY_NO_SENSE &&
2630 				    sts.req_sense_data[2] !=
2631 				    KEY_UNIT_ATTENTION) {
2632 					ha->xioctl->DeviceErrorCount++;
2633 				}
2634 
2635 				sense_sz = sts.req_sense_length;
2636 
2637 				/* Insure data does not exceed buf. */
2638 				if (sp->pkt->pkt_rsplen <=
2639 				    (uint32_t)sizeof (fcp_rsp_t) +
2640 				    fcpr->fcp_response_len) {
2641 					sp->request_sense_length = 0;
2642 				} else {
2643 					sp->request_sense_length = (uint32_t)
2644 					    (sp->pkt->pkt_rsplen -
2645 					    sizeof (fcp_rsp_t) -
2646 					    fcpr->fcp_response_len);
2647 				}
2648 
2649 				if (sense_sz <
2650 				    sp->request_sense_length) {
2651 					sp->request_sense_length =
2652 					    sense_sz;
2653 				}
2654 
2655 				sp->request_sense_ptr = (caddr_t)rsp;
2656 
2657 				sense_sz = (uint32_t)
2658 				    (((uintptr_t)pkt23 +
2659 				    sizeof (sts_entry_t)) -
2660 				    (uintptr_t)sts.req_sense_data);
2661 				if (sp->request_sense_length <
2662 				    sense_sz) {
2663 					sense_sz =
2664 					    sp->request_sense_length;
2665 				}
2666 
2667 				fcpr->fcp_sense_len = sense_sz;
2668 
2669 				/* Move sense data. */
2670 				ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2671 				    (uint8_t *)sp->request_sense_ptr,
2672 				    sts.req_sense_data,
2673 				    (size_t)sense_sz,
2674 				    DDI_DEV_AUTOINCR);
2675 
2676 				sp->request_sense_ptr += sense_sz;
2677 				sp->request_sense_length -= sense_sz;
2678 				if (sp->request_sense_length != 0 &&
2679 				    !(CFG_IST(ha, CFG_CTRL_82XX))) {
2680 					rsp_q->status_srb = sp;
2681 				}
2682 			}
2683 
2684 			if (sense_sz != 0) {
2685 				EL(sp->ha, "check condition sense data, "
2686 				    "d_id=%xh, lun=%xh\n%2xh%3xh%3xh%3xh"
2687 				    "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
2688 				    "%3xh%3xh%3xh%3xh%3xh\n", tq->d_id.b24,
2689 				    sp->lun_queue->lun_no,
2690 				    sts.req_sense_data[0],
2691 				    sts.req_sense_data[1],
2692 				    sts.req_sense_data[2],
2693 				    sts.req_sense_data[3],
2694 				    sts.req_sense_data[4],
2695 				    sts.req_sense_data[5],
2696 				    sts.req_sense_data[6],
2697 				    sts.req_sense_data[7],
2698 				    sts.req_sense_data[8],
2699 				    sts.req_sense_data[9],
2700 				    sts.req_sense_data[10],
2701 				    sts.req_sense_data[11],
2702 				    sts.req_sense_data[12],
2703 				    sts.req_sense_data[13],
2704 				    sts.req_sense_data[14],
2705 				    sts.req_sense_data[15],
2706 				    sts.req_sense_data[16],
2707 				    sts.req_sense_data[17]);
2708 			} else {
2709 				EL(sp->ha, "check condition, d_id=%xh, lun=%xh"
2710 				    "\n", tq->d_id.b24, sp->lun_queue->lun_no);
2711 			}
2712 		}
2713 	}
2714 
2715 	/* Set completed status. */
2716 	sp->flags |= SRB_ISP_COMPLETED;
2717 
2718 	/* Place command on done queue. */
2719 	if (rsp_q->status_srb == NULL) {
2720 		ql_add_link_b(done_q, &sp->cmd);
2721 	}
2722 
2723 	QL_PRINT_3(ha, "done\n");
2724 
2725 	return (rval);
2726 }
2727 
2728 /*
2729  * ql_status_cont_entry
2730  *	Processes status continuation entry.
2731  *
2732  * Input:
2733  *	ha:		adapter state pointer.
2734  *	rsp_q:		response queue structure pointer.
2735  *	pkt:		entry pointer.
2736  *	done_q:		done queue pointer.
2737  *	set_flags:	task daemon flags to set.
2738  *	reset_flags:	task daemon flags to reset.
2739  *
2740  * Context:
2741  *	Interrupt or Kernel context, no mailbox commands allowed.
2742  */
2743 /* ARGSUSED */
2744 static void
ql_status_cont_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,sts_cont_entry_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)2745 ql_status_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2746     sts_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2747     uint64_t *reset_flags)
2748 {
2749 	uint32_t	sense_sz, index;
2750 	ql_srb_t	*sp = rsp_q->status_srb;
2751 
2752 	QL_PRINT_3(ha, "started\n");
2753 
2754 	if (sp != NULL && sp->request_sense_length) {
2755 		if (sp->request_sense_length > sizeof (pkt->req_sense_data)) {
2756 			sense_sz = sizeof (pkt->req_sense_data);
2757 		} else {
2758 			sense_sz = sp->request_sense_length;
2759 		}
2760 
2761 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2762 			for (index = 0; index < sense_sz; index += 4) {
2763 				ql_chg_endian((uint8_t *)
2764 				    &pkt->req_sense_data[0] + index, 4);
2765 			}
2766 		}
2767 
2768 		/* Move sense data. */
2769 		ddi_rep_get8(rsp_q->rsp_ring.acc_handle,
2770 		    (uint8_t *)sp->request_sense_ptr,
2771 		    (uint8_t *)&pkt->req_sense_data[0], (size_t)sense_sz,
2772 		    DDI_DEV_AUTOINCR);
2773 
2774 		sp->request_sense_ptr += sense_sz;
2775 		sp->request_sense_length -= sense_sz;
2776 
2777 		/* Place command on done queue. */
2778 		if (sp->request_sense_length == 0) {
2779 			ql_add_link_b(done_q, &sp->cmd);
2780 			rsp_q->status_srb = NULL;
2781 		}
2782 	}
2783 
2784 	QL_PRINT_3(ha, "done\n");
2785 }
2786 
2787 /*
2788  * ql_ip_entry
2789  *	Processes received ISP IP entry.
2790  *
2791  * Input:
2792  *	ha:		adapter state pointer.
2793  *	rsp_q:		response queue structure pointer.
2794  *	pkt:		entry pointer.
2795  *	done_q:		done queue pointer.
2796  *	set_flags:	task daemon flags to set.
2797  *	reset_flags:	task daemon flags to reset.
2798  *
2799  * Context:
2800  *	Interrupt or Kernel context, no mailbox commands allowed.
2801  */
2802 /* ARGSUSED */
2803 static void
ql_ip_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ip_entry_t * pkt23,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)2804 ql_ip_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ip_entry_t *pkt23,
2805     ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
2806 {
2807 	ql_srb_t	*sp = NULL;
2808 	uint32_t	index, resp_identifier;
2809 	ql_tgt_t	*tq;
2810 
2811 	QL_PRINT_3(ha, "started\n");
2812 
2813 	/* Validate the response entry handle. */
2814 	resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
2815 	    &pkt23->handle);
2816 	index = resp_identifier & OSC_INDEX_MASK;
2817 	if (index < ha->osc_max_cnt) {
2818 		/* the index seems reasonable */
2819 		if ((sp = ha->outstanding_cmds[index]) == NULL) {
2820 			sp = ql_verify_preprocessed_cmd(ha, rsp_q,
2821 			    (uint32_t *)&pkt23->handle,
2822 			    (uint32_t *)&resp_identifier, set_flags,
2823 			    reset_flags);
2824 		}
2825 		if (sp != NULL) {
2826 			if (sp == QL_ABORTED_SRB(ha)) {
2827 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
2828 				    resp_identifier);
2829 				sp = NULL;
2830 				ha->outstanding_cmds[index] = NULL;
2831 			} else if (sp->handle == resp_identifier) {
2832 				/* Neo, you're the one... */
2833 				ha->outstanding_cmds[index] = NULL;
2834 				sp->handle = 0;
2835 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
2836 			} else {
2837 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
2838 				    resp_identifier, sp->handle);
2839 				sp = NULL;
2840 				ql_signal_abort(ha, set_flags);
2841 			}
2842 		}
2843 	} else {
2844 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
2845 		    index, resp_identifier);
2846 		ql_signal_abort(ha, set_flags);
2847 	}
2848 
2849 	if (sp != NULL) {
2850 		tq = sp->lun_queue->target_queue;
2851 
2852 		/* Set ISP completion status */
2853 		if (CFG_IST(ha, CFG_CTRL_24XX)) {
2854 			ip_cmd_entry_t	*pkt24 = (ip_cmd_entry_t *)pkt23;
2855 
2856 			sp->pkt->pkt_reason = ddi_get16(
2857 			    rsp_q->rsp_ring.acc_handle, &pkt24->hdl_status);
2858 		} else {
2859 			sp->pkt->pkt_reason = ddi_get16(
2860 			    rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
2861 		}
2862 
2863 		if (ha->task_daemon_flags & LOOP_DOWN) {
2864 			EL(ha, "Loop Not Ready Retry, d_id=%xh\n",
2865 			    tq->d_id.b24);
2866 
2867 			/* Set retry status. */
2868 			sp->flags |= SRB_RETRY;
2869 
2870 		} else if (tq->port_down_retry_count &&
2871 		    (sp->pkt->pkt_reason == CS_INCOMPLETE ||
2872 		    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
2873 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2874 		    sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
2875 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
2876 			EL(ha, "Port Down Retry=%xh, d_id=%xh, count=%d\n",
2877 			    sp->pkt->pkt_reason, tq->d_id.b24,
2878 			    tq->port_down_retry_count);
2879 
2880 			/* Set retry status. */
2881 			sp->flags |= SRB_RETRY;
2882 
2883 			if (sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT ||
2884 			    sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE) {
2885 				ha->adapter_stats->d_stats[lobyte(
2886 				    tq->loop_id)].logouts_recvd++;
2887 				ql_send_logo(ha, tq, done_q);
2888 			}
2889 
2890 			/* Acquire device queue lock. */
2891 			DEVICE_QUEUE_LOCK(tq);
2892 
2893 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
2894 				tq->flags |= TQF_QUEUE_SUSPENDED;
2895 
2896 				tq->port_down_retry_count--;
2897 
2898 				ADAPTER_STATE_LOCK(ha);
2899 				if (ha->port_retry_timer == 0) {
2900 					if ((ha->port_retry_timer =
2901 					    ha->port_down_retry_delay) == 0) {
2902 						*set_flags |=
2903 						    PORT_RETRY_NEEDED;
2904 					}
2905 				}
2906 				ADAPTER_STATE_UNLOCK(ha);
2907 			}
2908 
2909 			/* Release device queue specific lock. */
2910 			DEVICE_QUEUE_UNLOCK(tq);
2911 
2912 		} else if (sp->pkt->pkt_reason == CS_RESET) {
2913 			EL(ha, "Reset Retry, d_id=%xh\n", tq->d_id.b24);
2914 
2915 			/* Set retry status. */
2916 			sp->flags |= SRB_RETRY;
2917 		} else {
2918 			if (sp->pkt->pkt_reason != CS_COMPLETE) {
2919 				EL(ha, "Cmplt status err=%xh, d_id=%xh\n",
2920 				    sp->pkt->pkt_reason, tq->d_id.b24);
2921 			}
2922 		}
2923 
2924 		/* Set completed status. */
2925 		sp->flags |= SRB_ISP_COMPLETED;
2926 
2927 		ql_add_link_b(done_q, &sp->cmd);
2928 
2929 	}
2930 	QL_PRINT_3(ha, "done\n");
2931 }
2932 
2933 /*
2934  * ql_ip_rcv_entry
2935  *	Processes received ISP IP buffers entry.
2936  *
2937  * Input:
2938  *	ha:		adapter state pointer.
2939  *	rsp_q:		response queue structure pointer.
2940  *	pkt:		entry pointer.
2941  *	done_q:		done queue pointer.
2942  *	set_flags:	task daemon flags to set.
2943  *	reset_flags:	task daemon flags to reset.
2944  *
2945  * Context:
2946  *	Interrupt or Kernel context, no mailbox commands allowed.
2947  */
2948 /* ARGSUSED */
2949 static void
ql_ip_rcv_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ip_rcv_entry_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)2950 ql_ip_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
2951     ip_rcv_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
2952     uint64_t *reset_flags)
2953 {
2954 	port_id_t	s_id;
2955 	uint16_t	index;
2956 	uint8_t		cnt;
2957 	ql_tgt_t	*tq;
2958 
2959 	QL_PRINT_3(ha, "started\n");
2960 
2961 	/* Locate device queue. */
2962 	s_id.b.al_pa = pkt->s_id[0];
2963 	s_id.b.area = pkt->s_id[1];
2964 	s_id.b.domain = pkt->s_id[2];
2965 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
2966 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
2967 		return;
2968 	}
2969 
2970 	tq->ub_sequence_length = (uint16_t)ddi_get16(
2971 	    rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
2972 	tq->ub_total_seg_cnt = pkt->segment_count;
2973 	tq->ub_seq_id = ++ha->ub_seq_id;
2974 	tq->ub_seq_cnt = 0;
2975 	tq->ub_frame_ro = 0;
2976 	tq->ub_loop_id = pkt->loop_id;
2977 	ha->rcv_dev_q = tq;
2978 
2979 	for (cnt = 0; cnt < IP_RCVBUF_HANDLES && tq->ub_seq_cnt <
2980 	    tq->ub_total_seg_cnt; cnt++) {
2981 
2982 		index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
2983 		    &pkt->buffer_handle[cnt]);
2984 
2985 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
2986 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
2987 			*set_flags |= ISP_ABORT_NEEDED;
2988 			break;
2989 		}
2990 	}
2991 
2992 	QL_PRINT_3(ha, "done\n");
2993 }
2994 
2995 /*
2996  * ql_ip_rcv_cont_entry
2997  *	Processes received ISP IP buffers continuation entry.
2998  *
2999  * Input:
3000  *	ha:		adapter state pointer.
3001  *	rsp_q:		response queue structure pointer.
3002  *	pkt:		entry pointer.
3003  *	done_q:		done queue pointer.
3004  *	set_flags:	task daemon flags to set.
3005  *	reset_flags:	task daemon flags to reset.
3006  *
3007  * Context:
3008  *	Interrupt or Kernel context, no mailbox commands allowed.
3009  */
3010 /* ARGSUSED */
3011 static void
ql_ip_rcv_cont_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ip_rcv_cont_entry_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)3012 ql_ip_rcv_cont_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3013     ip_rcv_cont_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3014     uint64_t *reset_flags)
3015 {
3016 	uint16_t	index;
3017 	uint8_t		cnt;
3018 	ql_tgt_t	*tq;
3019 
3020 	QL_PRINT_3(ha, "started\n");
3021 
3022 	if ((tq = ha->rcv_dev_q) == NULL) {
3023 		EL(ha, "No IP receive device\n");
3024 		return;
3025 	}
3026 
3027 	for (cnt = 0; cnt < IP_RCVBUF_CONT_HANDLES &&
3028 	    tq->ub_seq_cnt < tq->ub_total_seg_cnt; cnt++) {
3029 
3030 		index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
3031 		    &pkt->buffer_handle[cnt]);
3032 
3033 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3034 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3035 			*set_flags |= ISP_ABORT_NEEDED;
3036 			break;
3037 		}
3038 	}
3039 
3040 	QL_PRINT_3(ha, "done\n");
3041 }
3042 
3043 /*
3044  * ip_rcv_24xx_entry_t
3045  *	Processes received ISP24xx IP buffers entry.
3046  *
3047  * Input:
3048  *	ha:		adapter state pointer.
3049  *	rsp_q:		response queue structure pointer.
3050  *	pkt:		entry pointer.
3051  *	done_q:		done queue pointer.
3052  *	set_flags:	task daemon flags to set.
3053  *	reset_flags:	task daemon flags to reset.
3054  *
3055  * Context:
3056  *	Interrupt or Kernel context, no mailbox commands allowed.
3057  */
3058 /* ARGSUSED */
3059 static void
ql_ip_24xx_rcv_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ip_rcv_24xx_entry_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)3060 ql_ip_24xx_rcv_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3061     ip_rcv_24xx_entry_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3062     uint64_t *reset_flags)
3063 {
3064 	port_id_t	s_id;
3065 	uint16_t	index;
3066 	uint8_t		cnt;
3067 	ql_tgt_t	*tq;
3068 
3069 	QL_PRINT_3(ha, "started\n");
3070 
3071 	/* Locate device queue. */
3072 	s_id.b.al_pa = pkt->s_id[0];
3073 	s_id.b.area = pkt->s_id[1];
3074 	s_id.b.domain = pkt->s_id[2];
3075 	if ((tq = ql_d_id_to_queue(ha, s_id)) == NULL) {
3076 		EL(ha, "Unknown IP device ID=%xh\n", s_id.b24);
3077 		return;
3078 	}
3079 
3080 	if (tq->ub_total_seg_cnt == 0) {
3081 		tq->ub_sequence_length = (uint16_t)ddi_get16(
3082 		    rsp_q->rsp_ring.acc_handle, &pkt->seq_length);
3083 		tq->ub_total_seg_cnt = pkt->segment_count;
3084 		tq->ub_seq_id = ++ha->ub_seq_id;
3085 		tq->ub_seq_cnt = 0;
3086 		tq->ub_frame_ro = 0;
3087 		tq->ub_loop_id = (uint16_t)ddi_get16(
3088 		    rsp_q->rsp_ring.acc_handle, &pkt->n_port_hdl);
3089 	}
3090 
3091 	for (cnt = 0; cnt < IP_24XX_RCVBUF_HANDLES && tq->ub_seq_cnt <
3092 	    tq->ub_total_seg_cnt; cnt++) {
3093 
3094 		index = (uint16_t)ddi_get16(rsp_q->rsp_ring.acc_handle,
3095 		    &pkt->buffer_handle[cnt]);
3096 
3097 		if (ql_ub_frame_hdr(ha, tq, index, done_q) != QL_SUCCESS) {
3098 			EL(ha, "ql_ub_frame_hdr failed, isp_abort_needed\n");
3099 			*set_flags |= ISP_ABORT_NEEDED;
3100 			break;
3101 		}
3102 	}
3103 
3104 	QL_PRINT_3(ha, "done\n");
3105 }
3106 
3107 /*
3108  * ql_ms_entry
3109  *	Processes received Name/Management/CT Pass-Through entry.
3110  *
3111  * Input:
3112  *	ha:		adapter state pointer.
3113  *	rsp_q:		response queue structure pointer.
3114  *	pkt23:		entry pointer.
3115  *	done_q:		done queue pointer.
3116  *	set_flags:	task daemon flags to set.
3117  *	reset_flags:	task daemon flags to reset.
3118  *
3119  * Context:
3120  *	Interrupt or Kernel context, no mailbox commands allowed.
3121  */
3122 /* ARGSUSED */
3123 static void
ql_ms_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,ms_entry_t * pkt23,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)3124 ql_ms_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q, ms_entry_t *pkt23,
3125     ql_head_t *done_q, uint64_t *set_flags, uint64_t *reset_flags)
3126 {
3127 	ql_srb_t		*sp = NULL;
3128 	uint32_t		index, cnt, resp_identifier;
3129 	ql_tgt_t		*tq;
3130 	ct_passthru_entry_t	*pkt24 = (ct_passthru_entry_t *)pkt23;
3131 
3132 	QL_PRINT_3(ha, "started\n");
3133 
3134 	/* Validate the response entry handle. */
3135 	resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle,
3136 	    &pkt23->handle);
3137 	index = resp_identifier & OSC_INDEX_MASK;
3138 	if (index < ha->osc_max_cnt) {
3139 		/* the index seems reasonable */
3140 		if ((sp = ha->outstanding_cmds[index]) == NULL) {
3141 			sp = ql_verify_preprocessed_cmd(ha, rsp_q,
3142 			    (uint32_t *)&pkt23->handle,
3143 			    (uint32_t *)&resp_identifier, set_flags,
3144 			    reset_flags);
3145 		}
3146 		if (sp != NULL) {
3147 			if (sp == QL_ABORTED_SRB(ha)) {
3148 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3149 				    resp_identifier);
3150 				sp = NULL;
3151 				ha->outstanding_cmds[index] = NULL;
3152 			} else if (sp->handle == resp_identifier) {
3153 				/* Neo, you're the one... */
3154 				ha->outstanding_cmds[index] = NULL;
3155 				sp->handle = 0;
3156 				sp->flags &= ~SRB_IN_TOKEN_ARRAY;
3157 			} else {
3158 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3159 				    resp_identifier, sp->handle);
3160 				sp = NULL;
3161 				ql_signal_abort(ha, set_flags);
3162 			}
3163 		}
3164 	} else {
3165 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3166 		    index, resp_identifier);
3167 		ql_signal_abort(ha, set_flags);
3168 	}
3169 
3170 	if (sp != NULL) {
3171 		if (!(sp->flags & SRB_MS_PKT)) {
3172 			EL(ha, "Not SRB_MS_PKT flags=%xh, isp_abort_needed",
3173 			    sp->flags);
3174 			*set_flags |= ISP_ABORT_NEEDED;
3175 			return;
3176 		}
3177 
3178 		tq = sp->lun_queue->target_queue;
3179 
3180 		/* Set ISP completion status */
3181 		if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3182 			sp->pkt->pkt_reason = ddi_get16(
3183 			    rsp_q->rsp_ring.acc_handle, &pkt24->status);
3184 		} else {
3185 			sp->pkt->pkt_reason = ddi_get16(
3186 			    rsp_q->rsp_ring.acc_handle, &pkt23->comp_status);
3187 		}
3188 
3189 		if (sp->pkt->pkt_reason == CS_RESOUCE_UNAVAILABLE &&
3190 		    sp->retry_count) {
3191 			EL(ha, "Resouce Unavailable Retry = %d\n",
3192 			    sp->retry_count);
3193 
3194 			/* Set retry status. */
3195 			sp->retry_count--;
3196 			sp->flags |= SRB_RETRY;
3197 
3198 			/* Acquire device queue lock. */
3199 			DEVICE_QUEUE_LOCK(tq);
3200 
3201 			if (!(tq->flags & TQF_QUEUE_SUSPENDED)) {
3202 				tq->flags |= TQF_QUEUE_SUSPENDED;
3203 
3204 				ADAPTER_STATE_LOCK(ha);
3205 				if (ha->port_retry_timer == 0) {
3206 					ha->port_retry_timer = 2;
3207 				}
3208 				ADAPTER_STATE_UNLOCK(ha);
3209 			}
3210 
3211 			/* Release device queue specific lock. */
3212 			DEVICE_QUEUE_UNLOCK(tq);
3213 
3214 		} else if (tq->port_down_retry_count &&
3215 		    (sp->pkt->pkt_reason == CS_PORT_CONFIG_CHG ||
3216 		    sp->pkt->pkt_reason == CS_PORT_BUSY)) {
3217 			EL(ha, "Port Down Retry\n");
3218 
3219 			/* Set retry status. */
3220 			sp->flags |= SRB_RETRY;
3221 
3222 			/* Acquire device queue lock. */
3223 			DEVICE_QUEUE_LOCK(tq);
3224 
3225 			if ((tq->flags & TQF_QUEUE_SUSPENDED) == 0) {
3226 				tq->flags |= TQF_QUEUE_SUSPENDED;
3227 
3228 				tq->port_down_retry_count--;
3229 
3230 				ADAPTER_STATE_LOCK(ha);
3231 				if (ha->port_retry_timer == 0) {
3232 					if ((ha->port_retry_timer =
3233 					    ha->port_down_retry_delay) == 0) {
3234 						*set_flags |=
3235 						    PORT_RETRY_NEEDED;
3236 					}
3237 				}
3238 				ADAPTER_STATE_UNLOCK(ha);
3239 			}
3240 			/* Release device queue specific lock. */
3241 			DEVICE_QUEUE_UNLOCK(tq);
3242 
3243 		} else if (sp->pkt->pkt_reason == CS_RESET) {
3244 			EL(ha, "Reset Retry\n");
3245 
3246 			/* Set retry status. */
3247 			sp->flags |= SRB_RETRY;
3248 
3249 		} else if (CFG_IST(ha, CFG_ISP_FW_TYPE_2) &&
3250 		    sp->pkt->pkt_reason == CS_DATA_UNDERRUN) {
3251 			cnt = ddi_get32(rsp_q->rsp_ring.acc_handle,
3252 			    &pkt24->resp_byte_count);
3253 			if (cnt < sizeof (fc_ct_header_t)) {
3254 				EL(ha, "Data underrun\n");
3255 			} else {
3256 				sp->pkt->pkt_reason = CS_COMPLETE;
3257 			}
3258 
3259 		} else if (sp->pkt->pkt_reason == CS_PORT_UNAVAILABLE ||
3260 		    sp->pkt->pkt_reason == CS_PORT_LOGGED_OUT) {
3261 			EL(ha, "Port unavailable %xh\n", sp->pkt->pkt_reason);
3262 			DEVICE_QUEUE_LOCK(tq);
3263 			tq->flags |= TQF_LOGIN_NEEDED;
3264 			DEVICE_QUEUE_UNLOCK(tq);
3265 			sp->pkt->pkt_reason = CS_TIMEOUT;
3266 
3267 		} else if (sp->pkt->pkt_reason != CS_COMPLETE) {
3268 			EL(ha, "status err=%xh\n", sp->pkt->pkt_reason);
3269 		}
3270 
3271 		if (sp->pkt->pkt_reason == CS_COMPLETE) {
3272 			/*EMPTY*/
3273 			QL_PRINT_3(ha, "ct_cmdrsp=%x%02xh resp\n",
3274 			    sp->pkt->pkt_cmd[8], sp->pkt->pkt_cmd[9]);
3275 			QL_DUMP_3(sp->pkt->pkt_resp, 8, sp->pkt->pkt_rsplen);
3276 		}
3277 
3278 		/* For nameserver restore command, management change header. */
3279 		if ((sp->flags & SRB_RETRY) == 0) {
3280 			tq->d_id.b24 == FS_NAME_SERVER ?
3281 			    ql_cthdr_endian(sp->pkt->pkt_cmd_acc,
3282 			    sp->pkt->pkt_cmd, B_TRUE) :
3283 			    ql_cthdr_endian(sp->pkt->pkt_resp_acc,
3284 			    sp->pkt->pkt_resp, B_TRUE);
3285 		}
3286 
3287 		/* Set completed status. */
3288 		sp->flags |= SRB_ISP_COMPLETED;
3289 
3290 		/* Place command on done queue. */
3291 		ql_add_link_b(done_q, &sp->cmd);
3292 
3293 	}
3294 	QL_PRINT_3(ha, "done\n");
3295 }
3296 
3297 /*
3298  * ql_report_id_entry
3299  *	Processes received Name/Management/CT Pass-Through entry.
3300  *
3301  * Input:
3302  *	ha:		adapter state pointer.
3303  *	rsp_q:		response queue structure pointer.
3304  *	pkt:		entry pointer.
3305  *	done_q:		done queue pointer.
3306  *	set_flags:	task daemon flags to set.
3307  *	reset_flags:	task daemon flags to reset.
3308  *
3309  * Context:
3310  *	Interrupt or Kernel context, no mailbox commands allowed.
3311  */
3312 /* ARGSUSED */
3313 static void
ql_report_id_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,report_id_acq_t * pkt,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)3314 ql_report_id_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3315     report_id_acq_t *pkt, ql_head_t *done_q, uint64_t *set_flags,
3316     uint64_t *reset_flags)
3317 {
3318 	ql_adapter_state_t	*vha;
3319 
3320 	QL_PRINT_3(ha, "started\n");
3321 
3322 	EL(ha, "format=%d, index=%d, status=%d\n",
3323 	    pkt->format, pkt->vp_index, pkt->vp_status);
3324 
3325 	if (pkt->format == 1) {
3326 		/* Locate port state structure. */
3327 		for (vha = ha; vha != NULL; vha = vha->vp_next) {
3328 			if (vha->vp_index == pkt->vp_index) {
3329 				break;
3330 			}
3331 		}
3332 		if (vha != NULL) {
3333 			if (pkt->vp_status == CS_COMPLETE ||
3334 			    pkt->vp_status == CS_PORT_ID_CHANGE) {
3335 				if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3336 					vha->fcoe_fcf_idx = pkt->fcf_index;
3337 				}
3338 				if (vha->vp_index != 0) {
3339 					*set_flags |= LOOP_RESYNC_NEEDED;
3340 					*reset_flags &= ~LOOP_RESYNC_NEEDED;
3341 					vha->loop_down_timer =
3342 					    LOOP_DOWN_TIMER_OFF;
3343 					TASK_DAEMON_LOCK(ha);
3344 					vha->task_daemon_flags |=
3345 					    LOOP_RESYNC_NEEDED;
3346 					vha->task_daemon_flags &= ~LOOP_DOWN;
3347 					TASK_DAEMON_UNLOCK(ha);
3348 				}
3349 				ADAPTER_STATE_LOCK(ha);
3350 				vha->flags &= ~VP_ID_NOT_ACQUIRED;
3351 				ADAPTER_STATE_UNLOCK(ha);
3352 			} else {
3353 				/* FA-WWPN failure. */
3354 				if (pkt->vp_status == CS_INCOMPLETE &&
3355 				    pkt->ls_rjt_reason_code == 0xff &&
3356 				    pkt->ls_rjt_explanation == 0x44) {
3357 					*set_flags |= ISP_ABORT_NEEDED;
3358 				}
3359 				if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3360 					EL(ha, "sts sc=%d, rjt_rea=%xh, "
3361 					    "rjt_exp=%xh, rjt_sc=%xh\n",
3362 					    pkt->status_subcode,
3363 					    pkt->ls_rjt_reason_code,
3364 					    pkt->ls_rjt_explanation,
3365 					    pkt->ls_rjt_subcode);
3366 				}
3367 				ADAPTER_STATE_LOCK(ha);
3368 				vha->flags |= VP_ID_NOT_ACQUIRED;
3369 				ADAPTER_STATE_UNLOCK(ha);
3370 			}
3371 		}
3372 	}
3373 
3374 	QL_PRINT_3(ha, "done\n");
3375 }
3376 
3377 /*
3378  * ql_els_entry
3379  *	Processes received ELS Pass-Through entry.
3380  *
3381  * Input:
3382  *	ha:		adapter state pointer.
3383  *	rsp_q:		response queue structure pointer.
3384  *	pkt23:		entry pointer.
3385  *	done_q:		done queue pointer.
3386  *	set_flags:	task daemon flags to set.
3387  *	reset_flags:	task daemon flags to reset.
3388  *
3389  * Context:
3390  *	Interrupt or Kernel context, no mailbox commands allowed.
3391  */
3392 /* ARGSUSED */
3393 static void
ql_els_passthru_entry(ql_adapter_state_t * ha,ql_response_q_t * rsp_q,els_passthru_entry_rsp_t * rsp,ql_head_t * done_q,uint64_t * set_flags,uint64_t * reset_flags)3394 ql_els_passthru_entry(ql_adapter_state_t *ha, ql_response_q_t *rsp_q,
3395     els_passthru_entry_rsp_t *rsp, ql_head_t *done_q, uint64_t *set_flags,
3396     uint64_t *reset_flags)
3397 {
3398 	ql_tgt_t	*tq;
3399 	port_id_t	s_id;
3400 	ql_srb_t	*srb = NULL;
3401 	uint32_t	index, resp_identifier;
3402 
3403 	QL_PRINT_3(ha, "started\n");
3404 
3405 	/* Validate the response entry handle. */
3406 	resp_identifier = ddi_get32(rsp_q->rsp_ring.acc_handle, &rsp->handle);
3407 	index = resp_identifier & OSC_INDEX_MASK;
3408 	if (index < ha->osc_max_cnt) {
3409 		/* the index seems reasonable */
3410 		if ((srb = ha->outstanding_cmds[index]) == NULL) {
3411 			srb = ql_verify_preprocessed_cmd(ha, rsp_q,
3412 			    (uint32_t *)&rsp->handle,
3413 			    (uint32_t *)&resp_identifier, set_flags,
3414 			    reset_flags);
3415 		}
3416 		if (srb != NULL) {
3417 			if (srb == QL_ABORTED_SRB(ha)) {
3418 				EL(ha, "QL_ABORTED_SRB handle=%xh\n",
3419 				    resp_identifier);
3420 				srb = NULL;
3421 				ha->outstanding_cmds[index] = NULL;
3422 			} else if (srb->handle == resp_identifier) {
3423 				/* Neo, you're the one... */
3424 				ha->outstanding_cmds[index] = NULL;
3425 				srb->handle = 0;
3426 				srb->flags &= ~SRB_IN_TOKEN_ARRAY;
3427 			} else {
3428 				EL(ha, "IOCB handle mismatch pkt=%xh, sp=%xh\n",
3429 				    resp_identifier, srb->handle);
3430 				srb = NULL;
3431 				ql_signal_abort(ha, set_flags);
3432 			}
3433 		}
3434 	} else {
3435 		EL(ha, "osc index out of range, index=%xh, handle=%xh\n",
3436 		    index, resp_identifier);
3437 		ql_signal_abort(ha, set_flags);
3438 	}
3439 
3440 	if (srb != NULL) {
3441 		if (!(srb->flags & SRB_ELS_PKT)) {
3442 			EL(ha, "Not SRB_ELS_PKT flags=%xh, isp_abort_needed\n",
3443 			    srb->flags);
3444 			*set_flags |= ISP_ABORT_NEEDED;
3445 			return;
3446 		}
3447 
3448 		(void) ddi_dma_sync(srb->pkt->pkt_resp_dma, 0, 0,
3449 		    DDI_DMA_SYNC_FORKERNEL);
3450 
3451 		/* Set ISP completion status */
3452 		srb->pkt->pkt_reason = ddi_get16(rsp_q->rsp_ring.acc_handle,
3453 		    &rsp->comp_status);
3454 
3455 		if (srb->pkt->pkt_reason != CS_COMPLETE) {
3456 			la_els_rjt_t	rjt;
3457 
3458 			EL(ha, "srb=%ph,status err=%xh\n",
3459 			    srb, srb->pkt->pkt_reason);
3460 
3461 			if (srb->pkt->pkt_reason == CS_LOGIN_LOGOUT_ERROR) {
3462 				EL(ha, "e1=%xh e2=%xh\n",
3463 				    rsp->error_subcode1, rsp->error_subcode2);
3464 			}
3465 
3466 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3467 
3468 			/* Build RJT in the response. */
3469 			rjt.ls_code.ls_code = LA_ELS_RJT;
3470 			rjt.reason = FC_REASON_NO_CONNECTION;
3471 
3472 			ddi_rep_put8(srb->pkt->pkt_resp_acc, (uint8_t *)&rjt,
3473 			    (uint8_t *)srb->pkt->pkt_resp,
3474 			    sizeof (rjt), DDI_DEV_AUTOINCR);
3475 
3476 			srb->pkt->pkt_state = FC_PKT_TRAN_ERROR;
3477 			srb->pkt->pkt_reason = FC_REASON_NO_CONNECTION;
3478 		}
3479 
3480 		if (srb->pkt->pkt_reason == CS_COMPLETE) {
3481 			uint8_t		opcode;
3482 			uint16_t	loop_id;
3483 
3484 			/* Indicate ISP completion */
3485 			srb->flags |= SRB_ISP_COMPLETED;
3486 
3487 			loop_id = ddi_get16(rsp_q->rsp_ring.acc_handle,
3488 			    &rsp->n_port_hdl);
3489 
3490 			/* tq is obtained from lun_queue */
3491 			tq = srb->lun_queue->target_queue;
3492 
3493 			if (ha->topology & QL_N_PORT) {
3494 				/* on plogi success assume the chosen s_id */
3495 				opcode = ddi_get8(rsp_q->rsp_ring.acc_handle,
3496 				    &rsp->els_cmd_opcode);
3497 
3498 				EL(ha, "els opcode=%x srb=%ph,pkt=%ph, tq=%ph"
3499 				    ", portid=%xh, tqlpid=%xh, loop_id=%xh\n",
3500 				    opcode, srb, srb->pkt, tq, tq->d_id.b24,
3501 				    tq->loop_id, loop_id);
3502 
3503 				if (opcode == LA_ELS_PLOGI) {
3504 					s_id.b.al_pa = rsp->s_id_7_0;
3505 					s_id.b.area = rsp->s_id_15_8;
3506 					s_id.b.domain = rsp->s_id_23_16;
3507 
3508 					ha->d_id.b24 = s_id.b24;
3509 					EL(ha, "Set port's source ID %xh\n",
3510 					    ha->d_id.b24);
3511 				}
3512 			}
3513 			ql_isp_els_handle_rsp_endian(ha, srb);
3514 
3515 			if (ha != srb->ha) {
3516 				EL(ha, "ha=%x srb->ha=%x\n", ha, srb->ha);
3517 			}
3518 
3519 			if (tq != NULL) {
3520 				tq->logout_sent = 0;
3521 				tq->flags &= ~TQF_NEED_AUTHENTICATION;
3522 
3523 				if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
3524 					tq->flags |= TQF_IIDMA_NEEDED;
3525 				}
3526 				srb->pkt->pkt_state = FC_PKT_SUCCESS;
3527 			}
3528 		}
3529 
3530 		/* Remove command from watchdog queue. */
3531 		if (srb->flags & SRB_WATCHDOG_ENABLED) {
3532 			tq = srb->lun_queue->target_queue;
3533 
3534 			DEVICE_QUEUE_LOCK(tq);
3535 			ql_remove_link(&tq->wdg, &srb->wdg);
3536 			srb->flags &= ~SRB_WATCHDOG_ENABLED;
3537 			DEVICE_QUEUE_UNLOCK(tq);
3538 		}
3539 
3540 		/* invoke the callback */
3541 		ql_io_comp(srb);
3542 	}
3543 	QL_PRINT_3(ha, "done\n");
3544 }
3545 
3546 /*
3547  * ql_signal_abort
3548  *	Signal to the task daemon that a condition warranting an
3549  *	isp reset has been detected.
3550  *
3551  * Input:
3552  *	ha:		adapter state pointer.
3553  *	set_flags:	task daemon flags to set.
3554  *
3555  * Context:
3556  *	Interrupt or Kernel context, no mailbox commands allowed.
3557  */
3558 static void
ql_signal_abort(ql_adapter_state_t * ha,uint64_t * set_flags)3559 ql_signal_abort(ql_adapter_state_t *ha, uint64_t *set_flags)
3560 {
3561 	if (!CFG_IST(ha, CFG_CTRL_82XX) &&
3562 	    !(ha->task_daemon_flags & (ISP_ABORT_NEEDED | ABORT_ISP_ACTIVE))) {
3563 		*set_flags |= ISP_ABORT_NEEDED;
3564 	}
3565 }
3566