1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_FCP_C);
31 
32 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 	getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34 
35 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37 static uint32_t	emlxs_iotag_flush(emlxs_hba_t *hba);
38 
39 /*
40  * This routine copies data from src then potentially swaps the destination to
41  * big endian. Assumes cnt is a multiple of sizeof(uint32_t).
42  */
43 extern void
44 emlxs_pcimem_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
45 {
46 	uint32_t ldata;
47 	int32_t i;
48 
49 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
50 		ldata = *src++;
51 		ldata = PCIMEM_LONG(ldata);
52 		*dest++ = ldata;
53 	}
54 }  /* emlxs_pcimem_bcopy */
55 
56 
57 /*
58  * This routine copies data from src then swaps the destination to big endian.
59  * Assumes cnt is a multiple of sizeof(uint32_t).
60  */
61 extern void
62 emlxs_swap_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
63 {
64 	uint32_t ldata;
65 	int32_t i;
66 
67 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
68 		ldata = *src++;
69 		ldata = SWAP_DATA32(ldata);
70 		*dest++ = ldata;
71 	}
72 }  /* End fc_swap_bcopy */
73 
74 
75 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
76 #define	SCSI_INQUIRY			0x12
77 #define	SCSI_RX_DIAG    		0x1C
78 
79 
80 /*
81  *  emlxs_handle_fcp_event
82  *
83  *  Description: Process an FCP Rsp Ring completion
84  *
85  */
86 /* ARGSUSED */
87 extern void
88 emlxs_handle_fcp_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
89 {
90 	emlxs_port_t *port = &PPORT;
91 	IOCB *cmd;
92 	emlxs_buf_t *sbp;
93 	fc_packet_t *pkt = NULL;
94 #ifdef SAN_DIAG_SUPPORT
95 	NODELIST *ndlp;
96 #endif
97 	uint32_t iostat;
98 	uint8_t localstat;
99 	fcp_rsp_t *rsp;
100 	uint32_t rsp_data_resid;
101 	uint32_t check_underrun;
102 	uint8_t asc;
103 	uint8_t ascq;
104 	uint8_t scsi_status;
105 	uint8_t sense;
106 	uint32_t did;
107 	uint32_t fix_it;
108 	uint8_t *scsi_cmd;
109 	uint8_t scsi_opcode;
110 	uint16_t scsi_dl;
111 	uint32_t data_rx;
112 
113 	cmd = &iocbq->iocb;
114 
115 	/* Initialize the status */
116 	iostat = cmd->ulpStatus;
117 	localstat = 0;
118 	scsi_status = 0;
119 	asc = 0;
120 	ascq = 0;
121 	sense = 0;
122 	check_underrun = 0;
123 	fix_it = 0;
124 
125 	HBASTATS.FcpEvent++;
126 
127 	sbp = (emlxs_buf_t *)iocbq->sbp;
128 
129 	if (!sbp) {
130 		/* completion with missing xmit command */
131 		HBASTATS.FcpStray++;
132 
133 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
134 		    "cmd=%x iotag=%x", cmd->ulpCommand, cmd->ulpIoTag);
135 
136 		return;
137 	}
138 
139 	HBASTATS.FcpCompleted++;
140 
141 #ifdef SAN_DIAG_SUPPORT
142 	emlxs_update_sd_bucket(sbp);
143 #endif /* SAN_DIAG_SUPPORT */
144 
145 	pkt = PRIV2PKT(sbp);
146 
147 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
148 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
149 	scsi_opcode = scsi_cmd[12];
150 	data_rx = 0;
151 
152 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
153 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
154 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
155 		    DDI_DMA_SYNC_FORKERNEL);
156 
157 #ifdef TEST_SUPPORT
158 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
159 		    (pkt->pkt_datalen >= 512)) {
160 			hba->underrun_counter--;
161 			iostat = IOSTAT_FCP_RSP_ERROR;
162 
163 			/* Report 512 bytes missing by adapter */
164 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
165 
166 			/* Corrupt 512 bytes of Data buffer */
167 			bzero((uint8_t *)pkt->pkt_data, 512);
168 
169 			/* Set FCP response to STATUS_GOOD */
170 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
171 		}
172 #endif /* TEST_SUPPORT */
173 	}
174 
175 	/* Process the pkt */
176 	mutex_enter(&sbp->mtx);
177 
178 	/* Check for immediate return */
179 	if ((iostat == IOSTAT_SUCCESS) &&
180 	    (pkt->pkt_comp) &&
181 	    !(sbp->pkt_flags &
182 	    (PACKET_RETURNED | PACKET_COMPLETED |
183 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
184 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
185 	    PACKET_IN_ABORT | PACKET_POLLED))) {
186 		HBASTATS.FcpGood++;
187 
188 		sbp->pkt_flags |=
189 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
190 		    PACKET_COMPLETED | PACKET_RETURNED);
191 		mutex_exit(&sbp->mtx);
192 
193 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
194 		emlxs_unswap_pkt(sbp);
195 #endif /* EMLXS_MODREV2X */
196 
197 		(*pkt->pkt_comp) (pkt);
198 
199 		return;
200 	}
201 
202 	/*
203 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
204 	 * is reported.
205 	 */
206 
207 	/* Check if a response buffer was provided */
208 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
209 		emlxs_mpdata_sync(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
210 		    DDI_DMA_SYNC_FORKERNEL);
211 
212 		/* Get the response buffer pointer */
213 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
214 
215 		/* Set the valid response flag */
216 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
217 
218 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
219 
220 #ifdef SAN_DIAG_SUPPORT
221 		ndlp = (NODELIST *)iocbq->node;
222 		if (scsi_status == SCSI_STAT_QUE_FULL) {
223 			emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
224 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
225 		} else if (scsi_status == SCSI_STAT_BUSY) {
226 			emlxs_log_sd_scsi_event(port,
227 			    SD_SCSI_SUBCATEGORY_DEVBSY,
228 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
229 		}
230 #endif
231 
232 		/*
233 		 * Convert a task abort to a check condition with no data
234 		 * transferred. We saw a data corruption when Solaris received
235 		 * a Task Abort from a tape.
236 		 */
237 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
238 			EMLXS_MSGF(EMLXS_CONTEXT,
239 			    &emlxs_fcp_completion_error_msg,
240 			    "Task Abort. "
241 			    "Fixed.did=0x%06x sbp=%p cmd=%02x dl=%d",
242 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
243 
244 			rsp->fcp_u.fcp_status.scsi_status =
245 			    SCSI_STAT_CHECK_COND;
246 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
247 			rsp->fcp_u.fcp_status.sense_len_set = 0;
248 			rsp->fcp_u.fcp_status.resid_over = 0;
249 
250 			if (pkt->pkt_datalen) {
251 				rsp->fcp_u.fcp_status.resid_under = 1;
252 				rsp->fcp_resid =
253 				    SWAP_DATA32(pkt->pkt_datalen);
254 			} else {
255 				rsp->fcp_u.fcp_status.resid_under = 0;
256 				rsp->fcp_resid = 0;
257 			}
258 
259 			scsi_status = SCSI_STAT_CHECK_COND;
260 		}
261 
262 		/*
263 		 * We only need to check underrun if data could
264 		 * have been sent
265 		 */
266 
267 		/* Always check underrun if status is good */
268 		if (scsi_status == SCSI_STAT_GOOD) {
269 			check_underrun = 1;
270 		}
271 		/* Check the sense codes if this is a check condition */
272 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
273 			check_underrun = 1;
274 
275 			/* Check if sense data was provided */
276 			if (SWAP_DATA32(rsp->fcp_sense_len) >= 14) {
277 				sense = *((uint8_t *)rsp + 32 + 2);
278 				asc = *((uint8_t *)rsp + 32 + 12);
279 				ascq = *((uint8_t *)rsp + 32 + 13);
280 			}
281 
282 #ifdef SAN_DIAG_SUPPORT
283 			emlxs_log_sd_scsi_check_event(port,
284 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
285 			    scsi_opcode, sense, asc, ascq);
286 #endif
287 		}
288 		/* Status is not good and this is not a check condition */
289 		/* No data should have been sent */
290 		else {
291 			check_underrun = 0;
292 		}
293 
294 		/* Get the residual underrun count reported by the SCSI reply */
295 		rsp_data_resid = (pkt->pkt_datalen &&
296 		    rsp->fcp_u.fcp_status.resid_under) ? SWAP_DATA32(rsp->
297 		    fcp_resid) : 0;
298 
299 		/* Set the pkt resp_resid field */
300 		pkt->pkt_resp_resid = 0;
301 
302 		/* Set the pkt data_resid field */
303 		if (pkt->pkt_datalen &&
304 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
305 			/*
306 			 * Get the residual underrun count reported by
307 			 * our adapter
308 			 */
309 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
310 
311 #ifdef SAN_DIAG_SUPPORT
312 			if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
313 				emlxs_log_sd_fc_rdchk_event(port,
314 				    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
315 				    scsi_opcode, pkt->pkt_data_resid);
316 			}
317 #endif
318 
319 			/* Get the actual amount of data transferred */
320 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
321 
322 			/*
323 			 * If the residual being reported by the adapter is
324 			 * greater than the residual being reported in the
325 			 * reply, then we have a true underrun.
326 			 */
327 			if (check_underrun &&
328 			    (pkt->pkt_data_resid > rsp_data_resid)) {
329 				switch (scsi_opcode) {
330 				case SCSI_INQUIRY:
331 					scsi_dl = scsi_cmd[16];
332 					break;
333 
334 				case SCSI_RX_DIAG:
335 					scsi_dl =
336 					    (scsi_cmd[15] * 0x100) +
337 					    scsi_cmd[16];
338 					break;
339 
340 				default:
341 					scsi_dl = pkt->pkt_datalen;
342 				}
343 
344 #ifdef FCP_UNDERRUN_PATCH1
345 				/*
346 				 * If status is not good and no data was
347 				 * actually transferred, then we must fix
348 				 * the issue
349 				 */
350 				if ((scsi_status != SCSI_STAT_GOOD) &&
351 				    (data_rx == 0)) {
352 					fix_it = 1;
353 
354 					EMLXS_MSGF(EMLXS_CONTEXT,
355 					    &emlxs_fcp_completion_error_msg,
356 					    "Underrun(1). Fixed. "
357 					    "did=0x%06x sbp=%p cmd=%02x "
358 					    "dl=%d,%d rx=%d rsp=%d",
359 					    did, sbp, scsi_opcode,
360 					    pkt->pkt_datalen, scsi_dl,
361 					    (pkt->pkt_datalen -
362 					    cmd->un.fcpi.fcpi_parm),
363 					    rsp_data_resid);
364 
365 				}
366 #endif /* FCP_UNDERRUN_PATCH1 */
367 
368 
369 #ifdef FCP_UNDERRUN_PATCH2
370 				if ((scsi_status == SCSI_STAT_GOOD)) {
371 					emlxs_msg_t	*msg;
372 
373 					msg = &emlxs_fcp_completion_error_msg;
374 					/*
375 					 * If status is good and this is an
376 					 * inquiry request and the amount of
377 					 * data
378 					 */
379 					/*
380 					 * requested <= data received, then we
381 					 * must fix the issue.
382 					 */
383 
384 					if ((scsi_opcode == SCSI_INQUIRY) &&
385 					    (pkt->pkt_datalen >= data_rx) &&
386 					    (scsi_dl <= data_rx)) {
387 						fix_it = 1;
388 
389 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
390 						    "Underrun(2). Fixed. "
391 						    "did=0x%06x sbp=%p "
392 						    "cmd=%02x dl=%d,%d "
393 						    "rx=%d rsp=%d",
394 						    did, sbp, scsi_opcode,
395 						    pkt->pkt_datalen, scsi_dl,
396 						    data_rx, rsp_data_resid);
397 
398 					}
399 
400 					/*
401 					 * If status is good and this is an
402 					 * inquiry request and the amount of
403 					 * data requested >= 128 bytes, but
404 					 * only 128 bytes were received,
405 					 * then we must fix the issue.
406 					 */
407 					else if ((scsi_opcode ==
408 					    SCSI_INQUIRY) &&
409 					    (pkt->pkt_datalen >= 128) &&
410 					    (scsi_dl >= 128) &&
411 					    (data_rx == 128)) {
412 						fix_it = 1;
413 
414 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
415 						    "Underrun(3). Fixed. "
416 						    "did=0x%06x sbp=%p "
417 						    "cmd=%02x dl=%d,%d "
418 						    "rx=%d rsp=%d",
419 						    did, sbp, scsi_opcode,
420 						    pkt->pkt_datalen, scsi_dl,
421 						    data_rx, rsp_data_resid);
422 
423 					}
424 
425 				}
426 #endif /* FCP_UNDERRUN_PATCH2 */
427 
428 				/*
429 				 * Check if SCSI response payload should be
430 				 * fixed or if a DATA_UNDERRUN should be
431 				 * reported
432 				 */
433 				if (fix_it) {
434 					/*
435 					 * Fix the SCSI response payload itself
436 					 */
437 					rsp->fcp_u.fcp_status.resid_under = 1;
438 					rsp->fcp_resid =
439 					    SWAP_DATA32(pkt->pkt_data_resid);
440 				} else {
441 					/*
442 					 * Change the status from
443 					 * IOSTAT_FCP_RSP_ERROR to
444 					 * IOSTAT_DATA_UNDERRUN
445 					 */
446 					iostat = IOSTAT_DATA_UNDERRUN;
447 					pkt->pkt_data_resid =
448 					    pkt->pkt_datalen;
449 				}
450 			}
451 
452 			/*
453 			 * If the residual being reported by the adapter is
454 			 * less than the residual being reported in the reply,
455 			 * then we have a true overrun. Since we don't know
456 			 * where the extra data came from or went to then we
457 			 * cannot trust anything we received
458 			 */
459 			else if (rsp_data_resid > pkt->pkt_data_resid) {
460 				/*
461 				 * Change the status from
462 				 * IOSTAT_FCP_RSP_ERROR to
463 				 * IOSTAT_DATA_OVERRUN
464 				 */
465 				iostat = IOSTAT_DATA_OVERRUN;
466 				pkt->pkt_data_resid = pkt->pkt_datalen;
467 			}
468 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
469 
470 			/* Report whatever the target reported */
471 			pkt->pkt_data_resid = rsp_data_resid;
472 		}
473 	}
474 
475 	/*
476 	 * If pkt is tagged for timeout then set the return codes
477 	 * appropriately
478 	 */
479 	if (sbp->pkt_flags & PACKET_IN_TIMEOUT) {
480 		iostat = IOSTAT_LOCAL_REJECT;
481 		localstat = IOERR_ABORT_TIMEOUT;
482 		goto done;
483 	}
484 
485 	/* If pkt is tagged for abort then set the return codes appropriately */
486 	if (sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_IN_ABORT)) {
487 		iostat = IOSTAT_LOCAL_REJECT;
488 		localstat = IOERR_ABORT_REQUESTED;
489 		goto done;
490 	}
491 
492 	/* Print completion message */
493 	switch (iostat) {
494 	case IOSTAT_SUCCESS:
495 		/* Build SCSI GOOD status */
496 		if (pkt->pkt_rsplen) {
497 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
498 		}
499 		break;
500 
501 	case IOSTAT_FCP_RSP_ERROR:
502 		break;
503 
504 	case IOSTAT_REMOTE_STOP:
505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
506 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
507 		    scsi_opcode);
508 		break;
509 
510 	case IOSTAT_LOCAL_REJECT:
511 		localstat = cmd->un.grsp.perr.statLocalError;
512 
513 		switch (localstat) {
514 		case IOERR_SEQUENCE_TIMEOUT:
515 			EMLXS_MSGF(EMLXS_CONTEXT,
516 			    &emlxs_fcp_completion_error_msg,
517 			    "Local reject. "
518 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
519 			    emlxs_error_xlate(localstat), did, sbp,
520 			    scsi_opcode, pkt->pkt_timeout);
521 			break;
522 
523 		default:
524 			EMLXS_MSGF(EMLXS_CONTEXT,
525 			    &emlxs_fcp_completion_error_msg,
526 			    "Local reject. %s did=0x%06x sbp=%p cmd=%02x",
527 			    emlxs_error_xlate(localstat), did, sbp,
528 			    scsi_opcode);
529 		}
530 
531 		break;
532 
533 	case IOSTAT_NPORT_RJT:
534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
535 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
536 		    scsi_opcode);
537 		break;
538 
539 	case IOSTAT_FABRIC_RJT:
540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
541 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
542 		    scsi_opcode);
543 		break;
544 
545 	case IOSTAT_NPORT_BSY:
546 #ifdef SAN_DIAG_SUPPORT
547 		ndlp = (NODELIST *)iocbq->node;
548 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
549 #endif
550 
551 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
552 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
553 		    scsi_opcode);
554 		break;
555 
556 	case IOSTAT_FABRIC_BSY:
557 #ifdef SAN_DIAG_SUPPORT
558 		ndlp = (NODELIST *)iocbq->node;
559 		emlxs_log_sd_fc_bsy_event(port, NULL);
560 #endif
561 
562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
563 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
564 		    scsi_opcode);
565 		break;
566 
567 	case IOSTAT_INTERMED_RSP:
568 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
569 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
570 		    sbp, scsi_opcode);
571 		break;
572 
573 	case IOSTAT_LS_RJT:
574 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
575 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
576 		    scsi_opcode);
577 		break;
578 
579 	case IOSTAT_DATA_UNDERRUN:
580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
581 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
582 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
583 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
584 		    rsp_data_resid, scsi_status, sense, asc, ascq);
585 		break;
586 
587 	case IOSTAT_DATA_OVERRUN:
588 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
589 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
590 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
591 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
592 		    rsp_data_resid, scsi_status, sense, asc, ascq);
593 		break;
594 
595 	default:
596 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
597 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
598 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
599 		    scsi_opcode);
600 		break;
601 	}
602 
603 done:
604 
605 	if (iostat == IOSTAT_SUCCESS) {
606 		HBASTATS.FcpGood++;
607 	} else {
608 		HBASTATS.FcpError++;
609 	}
610 
611 	mutex_exit(&sbp->mtx);
612 
613 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
614 
615 	return;
616 
617 }  /* emlxs_handle_fcp_event() */
618 
619 
620 
621 /*
622  *  emlxs_post_buffer
623  *
624  *  This routine will post count buffers to the
625  *  ring with the QUE_RING_BUF_CN command. This
626  *  allows 2 buffers / command to be posted.
627  *  Returns the number of buffers NOT posted.
628  */
629 extern int
630 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
631 {
632 	emlxs_port_t *port = &PPORT;
633 	IOCB *icmd;
634 	IOCBQ *iocbq;
635 	MATCHMAP *mp;
636 	uint16_t tag;
637 	uint32_t maxqbuf;
638 	int32_t i;
639 	int32_t j;
640 	uint32_t seg;
641 	uint32_t size;
642 
643 	mp = 0;
644 	maxqbuf = 2;
645 	tag = (uint16_t)cnt;
646 	cnt += rp->fc_missbufcnt;
647 
648 	if (rp->ringno == FC_ELS_RING) {
649 		seg = MEM_BUF;
650 		size = MEM_ELSBUF_SIZE;
651 	} else if (rp->ringno == FC_IP_RING) {
652 		seg = MEM_IPBUF;
653 		size = MEM_IPBUF_SIZE;
654 	} else if (rp->ringno == FC_CT_RING) {
655 		seg = MEM_CTBUF;
656 		size = MEM_CTBUF_SIZE;
657 	}
658 #ifdef SFCT_SUPPORT
659 	else if (rp->ringno == FC_FCT_RING) {
660 		seg = MEM_FCTBUF;
661 		size = MEM_FCTBUF_SIZE;
662 	}
663 #endif /* SFCT_SUPPORT */
664 	else {
665 		return (0);
666 	}
667 
668 	/*
669 	 * While there are buffers to post
670 	 */
671 	while (cnt) {
672 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
673 			rp->fc_missbufcnt = cnt;
674 			return (cnt);
675 		}
676 
677 		iocbq->ring = (void *)rp;
678 		iocbq->port = (void *)port;
679 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
680 
681 		icmd = &iocbq->iocb;
682 
683 		/*
684 		 * Max buffers can be posted per command
685 		 */
686 		for (i = 0; i < maxqbuf; i++) {
687 			if (cnt <= 0)
688 				break;
689 
690 			/* fill in BDEs for command */
691 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg)) == 0) {
692 				icmd->ulpBdeCount = i;
693 				for (j = 0; j < i; j++) {
694 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
695 					if (mp) {
696 						(void) emlxs_mem_put(hba, seg,
697 						    (uint8_t *)mp);
698 					}
699 				}
700 
701 				rp->fc_missbufcnt = cnt + i;
702 
703 				(void) emlxs_mem_put(hba, MEM_IOCB,
704 				    (uint8_t *)iocbq);
705 
706 				return (cnt + i);
707 			}
708 
709 			/*
710 			 * map that page and save the address pair for lookup
711 			 * later
712 			 */
713 			emlxs_mem_map_vaddr(hba,
714 			    rp,
715 			    mp,
716 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
717 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
718 
719 			icmd->un.cont64[i].tus.f.bdeSize = size;
720 			icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
721 
722 			/*
723 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
724 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
725 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
726 			 *    icmd->un.cont64[i].addrLow, size);
727 			 */
728 
729 			cnt--;
730 		}
731 
732 		icmd->ulpIoTag = tag;
733 		icmd->ulpBdeCount = i;
734 		icmd->ulpLe = 1;
735 		icmd->ulpOwner = OWN_CHIP;
736 		/* used for delimiter between commands */
737 		iocbq->bp = (uint8_t *)mp;
738 
739 		emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
740 	}
741 
742 	rp->fc_missbufcnt = 0;
743 
744 	return (0);
745 
746 }  /* emlxs_post_buffer() */
747 
748 
749 extern int
750 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
751 {
752 	emlxs_hba_t *hba = HBA;
753 	emlxs_config_t *cfg;
754 	NODELIST *nlp;
755 	fc_affected_id_t *aid;
756 	uint32_t mask;
757 	uint32_t aff_d_id;
758 	uint32_t linkdown;
759 	uint32_t vlinkdown;
760 	uint32_t action;
761 	int i;
762 	uint32_t unreg_vpi;
763 	uint32_t update;
764 	uint32_t adisc_support;
765 
766 	/* Target mode only uses this routine for linkdowns */
767 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
768 		return (0);
769 	}
770 
771 	cfg = &CFG;
772 	aid = (fc_affected_id_t *)&scope;
773 	linkdown = 0;
774 	vlinkdown = 0;
775 	unreg_vpi = 0;
776 	update = 0;
777 
778 	if (!(port->flag & EMLXS_PORT_BOUND)) {
779 		return (0);
780 	}
781 
782 	switch (aid->aff_format) {
783 	case 0:	/* Port */
784 		mask = 0x00ffffff;
785 		break;
786 
787 	case 1:	/* Area */
788 		mask = 0x00ffff00;
789 		break;
790 
791 	case 2:	/* Domain */
792 		mask = 0x00ff0000;
793 		break;
794 
795 	case 3:	/* Network */
796 		mask = 0x00000000;
797 		break;
798 
799 #ifdef DHCHAP_SUPPORT
800 	case 0xfe:	/* Virtual link down */
801 		mask = 0x00000000;
802 		vlinkdown = 1;
803 		break;
804 #endif /* DHCHAP_SUPPORT */
805 
806 	case 0xff:	/* link is down */
807 		mask = 0x00000000;
808 		linkdown = 1;
809 		break;
810 
811 	}
812 
813 	aff_d_id = aid->aff_d_id & mask;
814 
815 
816 	/*
817 	 * If link is down then this is a hard shutdown and flush
818 	 * If link not down then this is a soft shutdown and flush
819 	 * (e.g. RSCN)
820 	 */
821 	if (linkdown) {
822 		mutex_enter(&EMLXS_PORT_LOCK);
823 
824 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
825 		port->prev_did = port->did;
826 		port->did = 0;
827 
828 		if (port->ulp_statec != FC_STATE_OFFLINE) {
829 			port->ulp_statec = FC_STATE_OFFLINE;
830 			update = 1;
831 		}
832 
833 		mutex_exit(&EMLXS_PORT_LOCK);
834 
835 		/* Tell ULP about it */
836 		if (update) {
837 			if (port->flag & EMLXS_PORT_BOUND) {
838 				if (port->vpi == 0) {
839 					EMLXS_MSGF(EMLXS_CONTEXT,
840 					    &emlxs_link_down_msg, NULL);
841 				}
842 
843 				if (port->tgt_mode) {
844 #ifdef SFCT_SUPPORT
845 					emlxs_fct_link_down(port);
846 #endif /* SFCT_SUPPORT */
847 
848 				} else if (port->ini_mode) {
849 					port->ulp_statec_cb(port->ulp_handle,
850 					    FC_STATE_OFFLINE);
851 				}
852 			} else {
853 				if (port->vpi == 0) {
854 					EMLXS_MSGF(EMLXS_CONTEXT,
855 					    &emlxs_link_down_msg, "*");
856 				}
857 			}
858 
859 
860 		}
861 
862 		unreg_vpi = 1;
863 
864 #ifdef DHCHAP_SUPPORT
865 		/* Stop authentication with all nodes */
866 		emlxs_dhc_auth_stop(port, NULL);
867 #endif /* DHCHAP_SUPPORT */
868 
869 		/* Flush the base node */
870 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
871 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
872 
873 		/* Flush any pending ub buffers */
874 		emlxs_ub_flush(port);
875 	}
876 #ifdef DHCHAP_SUPPORT
877 	/* virtual link down */
878 	else if (vlinkdown) {
879 		mutex_enter(&EMLXS_PORT_LOCK);
880 
881 		if (port->ulp_statec != FC_STATE_OFFLINE) {
882 			port->ulp_statec = FC_STATE_OFFLINE;
883 			update = 1;
884 		}
885 
886 		mutex_exit(&EMLXS_PORT_LOCK);
887 
888 		/* Tell ULP about it */
889 		if (update) {
890 			if (port->flag & EMLXS_PORT_BOUND) {
891 				if (port->vpi == 0) {
892 					EMLXS_MSGF(EMLXS_CONTEXT,
893 					    &emlxs_link_down_msg,
894 					    "Switch authentication failed.");
895 				}
896 
897 #ifdef SFCT_SUPPORT
898 				if (port->tgt_mode) {
899 					emlxs_fct_link_down(port);
900 
901 				} else if (port->ini_mode) {
902 					port->ulp_statec_cb(port->ulp_handle,
903 					    FC_STATE_OFFLINE);
904 				}
905 #else
906 				port->ulp_statec_cb(port->ulp_handle,
907 				    FC_STATE_OFFLINE);
908 #endif	/* SFCT_SUPPORT */
909 			} else {
910 				if (port->vpi == 0) {
911 					EMLXS_MSGF(EMLXS_CONTEXT,
912 					    &emlxs_link_down_msg,
913 					    "Switch authentication failed. *");
914 				}
915 			}
916 
917 
918 		}
919 
920 		/* Flush the base node */
921 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
922 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
923 	}
924 #endif /* DHCHAP_SUPPORT */
925 
926 	if (port->tgt_mode) {
927 		goto done;
928 	}
929 
930 	/* Set the node tags */
931 	/* We will process all nodes with this tag */
932 	rw_enter(&port->node_rwlock, RW_READER);
933 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
934 		nlp = port->node_table[i];
935 		while (nlp != NULL) {
936 			nlp->nlp_tag = 1;
937 			nlp = nlp->nlp_list_next;
938 		}
939 	}
940 	rw_exit(&port->node_rwlock);
941 
942 	if (hba->flag & FC_ONLINE_MODE) {
943 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
944 	} else {
945 		adisc_support = 0;
946 	}
947 
948 	/* Check ADISC support level */
949 	switch (adisc_support) {
950 	case 0:	/* No support - Flush all IO to all matching nodes */
951 
952 		for (;;) {
953 			/*
954 			 * We need to hold the locks this way because
955 			 * emlxs_mb_unreg_did and the flush routines enter the
956 			 * same locks. Also, when we release the lock the list
957 			 * can change out from under us.
958 			 */
959 
960 			/* Find first node */
961 			rw_enter(&port->node_rwlock, RW_READER);
962 			action = 0;
963 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
964 				nlp = port->node_table[i];
965 				while (nlp != NULL) {
966 					if (!nlp->nlp_tag) {
967 						nlp = nlp->nlp_list_next;
968 						continue;
969 					}
970 					nlp->nlp_tag = 0;
971 
972 					/*
973 					 * Check for any device that matches
974 					 * our mask
975 					 */
976 					if ((nlp->nlp_DID & mask) == aff_d_id) {
977 						if (linkdown) {
978 							action = 1;
979 							break;
980 						} else { /* Must be an RCSN */
981 
982 							action = 2;
983 							break;
984 						}
985 					}
986 					nlp = nlp->nlp_list_next;
987 				}
988 
989 				if (action) {
990 					break;
991 				}
992 			}
993 			rw_exit(&port->node_rwlock);
994 
995 
996 			/* Check if nothing was found */
997 			if (action == 0) {
998 				break;
999 			} else if (action == 1) {
1000 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1001 				    NULL, NULL, NULL);
1002 			} else if (action == 2) {
1003 #ifdef DHCHAP_SUPPORT
1004 				emlxs_dhc_auth_stop(port, nlp);
1005 #endif /* DHCHAP_SUPPORT */
1006 
1007 				/*
1008 				 * Close the node for any further normal IO
1009 				 * A PLOGI with reopen the node
1010 				 */
1011 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1012 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1013 
1014 				/* Flush tx queue */
1015 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1016 
1017 				/* Flush chip queue */
1018 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1019 			}
1020 
1021 		}
1022 
1023 		break;
1024 
1025 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
1026 
1027 		for (;;) {
1028 
1029 			/*
1030 			 * We need to hold the locks this way because
1031 			 * emlxs_mb_unreg_did and the flush routines enter the
1032 			 * same locks. Also, when we release the lock the list
1033 			 * can change out from under us.
1034 			 */
1035 			rw_enter(&port->node_rwlock, RW_READER);
1036 			action = 0;
1037 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1038 				nlp = port->node_table[i];
1039 				while (nlp != NULL) {
1040 					if (!nlp->nlp_tag) {
1041 						nlp = nlp->nlp_list_next;
1042 						continue;
1043 					}
1044 					nlp->nlp_tag = 0;
1045 
1046 					/*
1047 					 * Check for special FCP2 target device
1048 					 * that matches our mask
1049 					 */
1050 					if ((nlp->nlp_fcp_info &
1051 					    NLP_FCP_TGT_DEVICE) &&
1052 					    (nlp-> nlp_fcp_info &
1053 					    NLP_FCP_2_DEVICE) &&
1054 					    (nlp->nlp_DID & mask) ==
1055 					    aff_d_id) {
1056 						action = 3;
1057 						break;
1058 					}
1059 
1060 					/*
1061 					 * Check for any other device that
1062 					 * matches our mask
1063 					 */
1064 					else if ((nlp->nlp_DID & mask) ==
1065 					    aff_d_id) {
1066 						if (linkdown) {
1067 							action = 1;
1068 							break;
1069 						} else { /* Must be an RSCN */
1070 
1071 							action = 2;
1072 							break;
1073 						}
1074 					}
1075 
1076 					nlp = nlp->nlp_list_next;
1077 				}
1078 
1079 				if (action) {
1080 					break;
1081 				}
1082 			}
1083 			rw_exit(&port->node_rwlock);
1084 
1085 			/* Check if nothing was found */
1086 			if (action == 0) {
1087 				break;
1088 			} else if (action == 1) {
1089 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1090 				    NULL, NULL, NULL);
1091 			} else if (action == 2) {
1092 #ifdef DHCHAP_SUPPORT
1093 				emlxs_dhc_auth_stop(port, nlp);
1094 #endif /* DHCHAP_SUPPORT */
1095 
1096 				/*
1097 				 * Close the node for any further normal IO
1098 				 * A PLOGI with reopen the node
1099 				 */
1100 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1101 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1102 
1103 				/* Flush tx queue */
1104 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1105 
1106 				/* Flush chip queue */
1107 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1108 
1109 			} else if (action == 3) {	/* FCP2 devices */
1110 				unreg_vpi = 0;
1111 
1112 #ifdef DHCHAP_SUPPORT
1113 				emlxs_dhc_auth_stop(port, nlp);
1114 #endif /* DHCHAP_SUPPORT */
1115 
1116 				/*
1117 				 * Close the node for any further normal IO
1118 				 * An ADISC or a PLOGI with reopen the node
1119 				 */
1120 				emlxs_node_close(port, nlp, FC_FCP_RING, -1);
1121 				emlxs_node_close(port, nlp, FC_IP_RING,
1122 				    ((linkdown) ? 0 : 60));
1123 
1124 				/* Flush tx queues except for FCP ring */
1125 				(void) emlxs_tx_node_flush(port, nlp,
1126 				    &hba->ring[FC_CT_RING], 0, 0);
1127 				(void) emlxs_tx_node_flush(port, nlp,
1128 				    &hba->ring[FC_ELS_RING], 0, 0);
1129 				(void) emlxs_tx_node_flush(port, nlp,
1130 				    &hba->ring[FC_IP_RING], 0, 0);
1131 
1132 				/* Flush chip queues except for FCP ring */
1133 				(void) emlxs_chipq_node_flush(port,
1134 				    &hba->ring[FC_CT_RING], nlp, 0);
1135 				(void) emlxs_chipq_node_flush(port,
1136 				    &hba->ring[FC_ELS_RING], nlp, 0);
1137 				(void) emlxs_chipq_node_flush(port,
1138 				    &hba->ring[FC_IP_RING], nlp, 0);
1139 			}
1140 		}
1141 		break;
1142 
1143 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1144 
1145 		if (!linkdown && !vlinkdown) {
1146 			break;
1147 		}
1148 
1149 		for (;;) {
1150 			/*
1151 			 * We need to hold the locks this way because
1152 			 * emlxs_mb_unreg_did and the flush routines enter the
1153 			 * same locks. Also, when we release the lock the list
1154 			 * can change out from under us.
1155 			 */
1156 			rw_enter(&port->node_rwlock, RW_READER);
1157 			action = 0;
1158 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1159 				nlp = port->node_table[i];
1160 				while (nlp != NULL) {
1161 					if (!nlp->nlp_tag) {
1162 						nlp = nlp->nlp_list_next;
1163 						continue;
1164 					}
1165 					nlp->nlp_tag = 0;
1166 
1167 					/*
1168 					 * Check for FCP target device that
1169 					 * matches our mask
1170 					 */
1171 					if ((nlp-> nlp_fcp_info &
1172 					    NLP_FCP_TGT_DEVICE) &&
1173 					    (nlp->nlp_DID & mask) ==
1174 					    aff_d_id) {
1175 						action = 3;
1176 						break;
1177 					}
1178 
1179 					/*
1180 					 * Check for any other device that
1181 					 * matches our mask
1182 					 */
1183 					else if ((nlp->nlp_DID & mask) ==
1184 					    aff_d_id) {
1185 						if (linkdown) {
1186 							action = 1;
1187 							break;
1188 						} else { /* Must be an RSCN */
1189 
1190 							action = 2;
1191 							break;
1192 						}
1193 					}
1194 
1195 					nlp = nlp->nlp_list_next;
1196 				}
1197 				if (action) {
1198 					break;
1199 				}
1200 			}
1201 			rw_exit(&port->node_rwlock);
1202 
1203 			/* Check if nothing was found */
1204 			if (action == 0) {
1205 				break;
1206 			} else if (action == 1) {
1207 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1208 				    NULL, NULL, NULL);
1209 			} else if (action == 2) {
1210 				/*
1211 				 * Close the node for any further normal IO
1212 				 * A PLOGI with reopen the node
1213 				 */
1214 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1215 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1216 
1217 				/* Flush tx queue */
1218 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1219 
1220 				/* Flush chip queue */
1221 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1222 
1223 			} else if (action == 3) {	/* FCP2 devices */
1224 				unreg_vpi = 0;
1225 
1226 				/*
1227 				 * Close the node for any further normal IO
1228 				 * An ADISC or a PLOGI with reopen the node
1229 				 */
1230 				emlxs_node_close(port, nlp, FC_FCP_RING, -1);
1231 				emlxs_node_close(port, nlp, FC_IP_RING,
1232 				    ((linkdown) ? 0 : 60));
1233 
1234 				/* Flush tx queues except for FCP ring */
1235 				(void) emlxs_tx_node_flush(port, nlp,
1236 				    &hba->ring[FC_CT_RING], 0, 0);
1237 				(void) emlxs_tx_node_flush(port, nlp,
1238 				    &hba->ring[FC_ELS_RING], 0, 0);
1239 				(void) emlxs_tx_node_flush(port, nlp,
1240 				    &hba->ring[FC_IP_RING], 0, 0);
1241 
1242 				/* Flush chip queues except for FCP ring */
1243 				(void) emlxs_chipq_node_flush(port,
1244 				    &hba->ring[FC_CT_RING], nlp, 0);
1245 				(void) emlxs_chipq_node_flush(port,
1246 				    &hba->ring[FC_ELS_RING], nlp, 0);
1247 				(void) emlxs_chipq_node_flush(port,
1248 				    &hba->ring[FC_IP_RING], nlp, 0);
1249 			}
1250 		}
1251 
1252 		break;
1253 
1254 	}	/* switch() */
1255 
1256 done:
1257 
1258 	if (unreg_vpi) {
1259 		(void) emlxs_mb_unreg_vpi(port);
1260 	}
1261 
1262 	return (0);
1263 
1264 }  /* emlxs_port_offline() */
1265 
1266 
1267 extern void
1268 emlxs_port_online(emlxs_port_t *vport)
1269 {
1270 	emlxs_hba_t *hba = vport->hba;
1271 	emlxs_port_t *port = &PPORT;
1272 	uint32_t state;
1273 	uint32_t update;
1274 	uint32_t npiv_linkup;
1275 	char topology[32];
1276 	char linkspeed[32];
1277 	char mode[32];
1278 
1279 	/*
1280 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1281 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1282 	 */
1283 
1284 	if ((vport->vpi > 0) &&
1285 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1286 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1287 		return;
1288 	}
1289 
1290 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1291 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1292 		return;
1293 	}
1294 
1295 	mutex_enter(&EMLXS_PORT_LOCK);
1296 
1297 	/* Check for mode */
1298 	if (port->tgt_mode) {
1299 		(void) strcpy(mode, ", target");
1300 	} else if (port->ini_mode) {
1301 		(void) strcpy(mode, ", initiator");
1302 	} else {
1303 		(void) strcpy(mode, "");
1304 	}
1305 
1306 	/* Check for loop topology */
1307 	if (hba->topology == TOPOLOGY_LOOP) {
1308 		state = FC_STATE_LOOP;
1309 		(void) strcpy(topology, ", loop");
1310 	} else {
1311 		state = FC_STATE_ONLINE;
1312 		(void) strcpy(topology, ", fabric");
1313 	}
1314 
1315 	/* Set the link speed */
1316 	switch (hba->linkspeed) {
1317 	case 0:
1318 		(void) strcpy(linkspeed, "Gb");
1319 		state |= FC_STATE_1GBIT_SPEED;
1320 		break;
1321 
1322 	case LA_1GHZ_LINK:
1323 		(void) strcpy(linkspeed, "1Gb");
1324 		state |= FC_STATE_1GBIT_SPEED;
1325 		break;
1326 	case LA_2GHZ_LINK:
1327 		(void) strcpy(linkspeed, "2Gb");
1328 		state |= FC_STATE_2GBIT_SPEED;
1329 		break;
1330 	case LA_4GHZ_LINK:
1331 		(void) strcpy(linkspeed, "4Gb");
1332 		state |= FC_STATE_4GBIT_SPEED;
1333 		break;
1334 	case LA_8GHZ_LINK:
1335 		(void) strcpy(linkspeed, "8Gb");
1336 		state |= FC_STATE_8GBIT_SPEED;
1337 		break;
1338 	case LA_10GHZ_LINK:
1339 		(void) strcpy(linkspeed, "10Gb");
1340 		state |= FC_STATE_10GBIT_SPEED;
1341 		break;
1342 	default:
1343 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1344 		break;
1345 	}
1346 
1347 	npiv_linkup = 0;
1348 	update = 0;
1349 
1350 	if ((hba->state >= FC_LINK_UP) &&
1351 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1352 		update = 1;
1353 		vport->ulp_statec = state;
1354 
1355 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1356 			hba->flag |= FC_NPIV_LINKUP;
1357 			npiv_linkup = 1;
1358 		}
1359 	}
1360 
1361 	mutex_exit(&EMLXS_PORT_LOCK);
1362 
1363 	/*
1364 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1365 	 *    "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x
1366 	 *    statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1367 	 *    hba->state, vport->ulp_statec);
1368 	 */
1369 	if (update) {
1370 		if (vport->flag & EMLXS_PORT_BOUND) {
1371 			if (vport->vpi == 0) {
1372 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1373 				    "%s%s%s", linkspeed, topology, mode);
1374 			} else if (npiv_linkup) {
1375 				EMLXS_MSGF(EMLXS_CONTEXT,
1376 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1377 				    linkspeed, topology, mode);
1378 			}
1379 
1380 			if (vport->tgt_mode) {
1381 #ifdef SFCT_SUPPORT
1382 				emlxs_fct_link_up(vport);
1383 #endif /* SFCT_SUPPORT */
1384 			} else if (vport->ini_mode) {
1385 				vport->ulp_statec_cb(vport->ulp_handle,
1386 				    state);
1387 			}
1388 		} else {
1389 			if (vport->vpi == 0) {
1390 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1391 				    "%s%s%s *", linkspeed, topology, mode);
1392 			} else if (npiv_linkup) {
1393 				EMLXS_MSGF(EMLXS_CONTEXT,
1394 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1395 				    linkspeed, topology, mode);
1396 			}
1397 		}
1398 
1399 		/* Check for waiting threads */
1400 		if (vport->vpi == 0) {
1401 			mutex_enter(&EMLXS_LINKUP_LOCK);
1402 			if (hba->linkup_wait_flag == TRUE) {
1403 				hba->linkup_wait_flag = FALSE;
1404 				cv_broadcast(&EMLXS_LINKUP_CV);
1405 			}
1406 			mutex_exit(&EMLXS_LINKUP_LOCK);
1407 		}
1408 
1409 		/* Flush any pending ub buffers */
1410 		emlxs_ub_flush(vport);
1411 	}
1412 
1413 	return;
1414 
1415 }  /* emlxs_port_online() */
1416 
1417 
1418 extern void
1419 emlxs_linkdown(emlxs_hba_t *hba)
1420 {
1421 	emlxs_port_t *port = &PPORT;
1422 	int i;
1423 
1424 	mutex_enter(&EMLXS_PORT_LOCK);
1425 
1426 	HBASTATS.LinkDown++;
1427 	emlxs_ffstate_change_locked(hba, FC_LINK_DOWN);
1428 
1429 	/* Filter hba flags */
1430 	hba->flag &= FC_LINKDOWN_MASK;
1431 	hba->discovery_timer = 0;
1432 	hba->linkup_timer = 0;
1433 
1434 	mutex_exit(&EMLXS_PORT_LOCK);
1435 
1436 	for (i = 0; i < MAX_VPORTS; i++) {
1437 		port = &VPORT(i);
1438 
1439 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1440 			continue;
1441 		}
1442 
1443 		(void) emlxs_port_offline(port, 0xffffffff);
1444 
1445 	}
1446 
1447 	return;
1448 
1449 }  /* emlxs_linkdown() */
1450 
1451 
1452 extern void
1453 emlxs_linkup(emlxs_hba_t *hba)
1454 {
1455 	emlxs_port_t *port = &PPORT;
1456 	emlxs_config_t *cfg = &CFG;
1457 
1458 	mutex_enter(&EMLXS_PORT_LOCK);
1459 
1460 	HBASTATS.LinkUp++;
1461 	emlxs_ffstate_change_locked(hba, FC_LINK_UP);
1462 
1463 #ifdef MENLO_SUPPORT
1464 	if (hba->flag & FC_MENLO_MODE) {
1465 		mutex_exit(&EMLXS_PORT_LOCK);
1466 
1467 		/*
1468 		 * Trigger linkup CV and don't start linkup & discovery
1469 		 * timers
1470 		 */
1471 		mutex_enter(&EMLXS_LINKUP_LOCK);
1472 		cv_broadcast(&EMLXS_LINKUP_CV);
1473 		mutex_exit(&EMLXS_LINKUP_LOCK);
1474 
1475 		return;
1476 	}
1477 #endif /* MENLO_SUPPORT */
1478 
1479 	/* Set the linkup & discovery timers */
1480 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1481 	hba->discovery_timer =
1482 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1483 	    cfg[CFG_DISC_TIMEOUT].current;
1484 
1485 	mutex_exit(&EMLXS_PORT_LOCK);
1486 
1487 	return;
1488 
1489 }  /* emlxs_linkup() */
1490 
1491 
1492 /*
1493  *  emlxs_reset_link
1494  *
1495  *  Description:
1496  *  Called to reset the link with an init_link
1497  *
1498  *    Returns:
1499  *
1500  */
1501 extern int
1502 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup)
1503 {
1504 	emlxs_port_t *port = &PPORT;
1505 	emlxs_config_t *cfg;
1506 	MAILBOX *mb;
1507 
1508 	/*
1509 	 * Get a buffer to use for the mailbox command
1510 	 */
1511 	if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == NULL) {
1512 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1513 		    "Unable to allocate mailbox buffer.");
1514 
1515 		return (1);
1516 	}
1517 
1518 	cfg = &CFG;
1519 
1520 	if (linkup) {
1521 		/*
1522 		 * Setup and issue mailbox INITIALIZE LINK command
1523 		 */
1524 
1525 		emlxs_mb_init_link(hba,
1526 		    (MAILBOX *) mb,
1527 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1528 
1529 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1530 
1531 		/* Clear the loopback mode */
1532 		mutex_enter(&EMLXS_PORT_LOCK);
1533 		hba->flag &= ~FC_LOOPBACK_MODE;
1534 		hba->loopback_tics = 0;
1535 		mutex_exit(&EMLXS_PORT_LOCK);
1536 
1537 		if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT,
1538 		    0) != MBX_BUSY) {
1539 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1540 		}
1541 
1542 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1543 
1544 	} else {	/* hold link down */
1545 
1546 		emlxs_mb_down_link(hba, (MAILBOX *)mb);
1547 
1548 		if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT,
1549 		    0) != MBX_BUSY) {
1550 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1551 		}
1552 
1553 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1554 		    "Disabling link...");
1555 	}
1556 
1557 	return (0);
1558 
1559 }  /* emlxs_reset_link() */
1560 
1561 
1562 extern int
1563 emlxs_online(emlxs_hba_t *hba)
1564 {
1565 	emlxs_port_t *port = &PPORT;
1566 	int32_t rval = 0;
1567 	uint32_t i = 0;
1568 
1569 	/* Make sure adapter is offline or exit trying (30 seconds) */
1570 	while (i++ < 30) {
1571 		/* Check if adapter is already going online */
1572 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1573 			return (0);
1574 		}
1575 
1576 		mutex_enter(&EMLXS_PORT_LOCK);
1577 
1578 		/* Check again */
1579 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1580 			mutex_exit(&EMLXS_PORT_LOCK);
1581 			return (0);
1582 		}
1583 
1584 		/* Check if adapter is offline */
1585 		if (hba->flag & FC_OFFLINE_MODE) {
1586 			/* Mark it going online */
1587 			hba->flag &= ~FC_OFFLINE_MODE;
1588 			hba->flag |= FC_ONLINING_MODE;
1589 
1590 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1591 			mutex_exit(&EMLXS_PORT_LOCK);
1592 			break;
1593 		}
1594 
1595 		mutex_exit(&EMLXS_PORT_LOCK);
1596 
1597 		DELAYMS(1000);
1598 	}
1599 
1600 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1601 	    "Going online...");
1602 
1603 	if (rval = emlxs_ffinit(hba)) {
1604 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1605 		    rval);
1606 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1607 
1608 		/* Set FC_OFFLINE_MODE */
1609 		mutex_enter(&EMLXS_PORT_LOCK);
1610 		emlxs_diag_state = DDI_OFFDI;
1611 		hba->flag |= FC_OFFLINE_MODE;
1612 		hba->flag &= ~FC_ONLINING_MODE;
1613 		mutex_exit(&EMLXS_PORT_LOCK);
1614 
1615 		return (rval);
1616 	}
1617 
1618 	/* Start the timer */
1619 	emlxs_timer_start(hba);
1620 
1621 	/* Set FC_ONLINE_MODE */
1622 	mutex_enter(&EMLXS_PORT_LOCK);
1623 	emlxs_diag_state = DDI_ONDI;
1624 	hba->flag |= FC_ONLINE_MODE;
1625 	hba->flag &= ~FC_ONLINING_MODE;
1626 	mutex_exit(&EMLXS_PORT_LOCK);
1627 
1628 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1629 
1630 #ifdef SFCT_SUPPORT
1631 	(void) emlxs_fct_port_initialize(port);
1632 #endif /* SFCT_SUPPORT */
1633 
1634 	return (rval);
1635 
1636 }  /* emlxs_online() */
1637 
1638 
1639 extern int
1640 emlxs_offline(emlxs_hba_t *hba)
1641 {
1642 	emlxs_port_t *port = &PPORT;
1643 	uint32_t i = 0;
1644 	int rval = 1;
1645 
1646 	/* Make sure adapter is online or exit trying (30 seconds) */
1647 	while (i++ < 30) {
1648 		/* Check if adapter is already going offline */
1649 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1650 			return (0);
1651 		}
1652 
1653 		mutex_enter(&EMLXS_PORT_LOCK);
1654 
1655 		/* Check again */
1656 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1657 			mutex_exit(&EMLXS_PORT_LOCK);
1658 			return (0);
1659 		}
1660 
1661 		/* Check if adapter is online */
1662 		if (hba->flag & FC_ONLINE_MODE) {
1663 			/* Mark it going offline */
1664 			hba->flag &= ~FC_ONLINE_MODE;
1665 			hba->flag |= FC_OFFLINING_MODE;
1666 
1667 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1668 			mutex_exit(&EMLXS_PORT_LOCK);
1669 			break;
1670 		}
1671 
1672 		mutex_exit(&EMLXS_PORT_LOCK);
1673 
1674 		DELAYMS(1000);
1675 	}
1676 
1677 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1678 	    "Going offline...");
1679 
1680 	if (port->ini_mode) {
1681 		/* Flush all IO */
1682 		emlxs_linkdown(hba);
1683 
1684 	}
1685 #ifdef SFCT_SUPPORT
1686 	else {
1687 		(void) emlxs_fct_port_shutdown(port);
1688 	}
1689 #endif /* SFCT_SUPPORT */
1690 
1691 	/* Check if adapter was shutdown */
1692 	if (hba->flag & FC_HARDWARE_ERROR) {
1693 		/*
1694 		 * Force mailbox cleanup
1695 		 * This will wake any sleeping or polling threads
1696 		 */
1697 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1698 	}
1699 
1700 	/* Pause here for the IO to settle */
1701 	delay(drv_usectohz(1000000));	/* 1 sec */
1702 
1703 	/* Unregister all nodes */
1704 	emlxs_ffcleanup(hba);
1705 
1706 
1707 	if (hba->bus_type == SBUS_FC) {
1708 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba, hba->sbus_csr_addr),
1709 		    0x9A);
1710 #ifdef FMA_SUPPORT
1711 		if (emlxs_fm_check_acc_handle(hba, hba->sbus_csr_handle)
1712 		    != DDI_FM_OK) {
1713 			EMLXS_MSGF(EMLXS_CONTEXT,
1714 			    &emlxs_invalid_access_handle_msg, NULL);
1715 		}
1716 #endif  /* FMA_SUPPORT */
1717 	}
1718 
1719 	/* Stop the timer */
1720 	emlxs_timer_stop(hba);
1721 
1722 	/* For safety flush every iotag list */
1723 	if (emlxs_iotag_flush(hba)) {
1724 		/* Pause here for the IO to flush */
1725 		delay(drv_usectohz(1000));
1726 	}
1727 
1728 	/* Wait for poll command request to settle */
1729 	while (hba->io_poll_count > 0) {
1730 		delay(drv_usectohz(2000000));   /* 2 sec */
1731 	}
1732 
1733 	emlxs_sli_offline(hba);
1734 
1735 	/* Free all the shared memory */
1736 	(void) emlxs_mem_free_buffer(hba);
1737 
1738 	mutex_enter(&EMLXS_PORT_LOCK);
1739 	hba->flag |= FC_OFFLINE_MODE;
1740 	hba->flag &= ~FC_OFFLINING_MODE;
1741 	emlxs_diag_state = DDI_OFFDI;
1742 	mutex_exit(&EMLXS_PORT_LOCK);
1743 
1744 	rval = 0;
1745 
1746 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1747 
1748 done:
1749 
1750 	return (rval);
1751 
1752 }  /* emlxs_offline() */
1753 
1754 
1755 
1756 extern int
1757 emlxs_power_down(emlxs_hba_t *hba)
1758 {
1759 #ifdef FMA_SUPPORT
1760 	emlxs_port_t *port = &PPORT;
1761 #endif  /* FMA_SUPPORT */
1762 	int32_t rval = 0;
1763 	uint32_t *ptr;
1764 	uint32_t i;
1765 
1766 	if ((rval = emlxs_offline(hba))) {
1767 		return (rval);
1768 	}
1769 
1770 	/* Save pci config space */
1771 	ptr = (uint32_t *)hba->pm_config;
1772 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1773 		*ptr =
1774 		    ddi_get32(hba->pci_acc_handle,
1775 		    (uint32_t *)(hba->pci_addr + i));
1776 	}
1777 
1778 	/* Put chip in D3 state */
1779 	(void) ddi_put8(hba->pci_acc_handle,
1780 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1781 	    (uint8_t)PCI_PM_D3_STATE);
1782 
1783 #ifdef FMA_SUPPORT
1784 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1785 	    != DDI_FM_OK) {
1786 		EMLXS_MSGF(EMLXS_CONTEXT,
1787 		    &emlxs_invalid_access_handle_msg, NULL);
1788 		return (1);
1789 	}
1790 #endif  /* FMA_SUPPORT */
1791 
1792 	return (0);
1793 
1794 }  /* End emlxs_power_down */
1795 
1796 
1797 extern int
1798 emlxs_power_up(emlxs_hba_t *hba)
1799 {
1800 #ifdef FMA_SUPPORT
1801 	emlxs_port_t *port = &PPORT;
1802 #endif  /* FMA_SUPPORT */
1803 	int32_t rval = 0;
1804 	uint32_t *ptr;
1805 	uint32_t i;
1806 
1807 
1808 	/* Take chip out of D3 state */
1809 	(void) ddi_put8(hba->pci_acc_handle,
1810 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1811 	    (uint8_t)PCI_PM_D0_STATE);
1812 
1813 	/* Must have at least 10 ms delay here */
1814 	DELAYMS(100);
1815 
1816 	/* Restore pci config space */
1817 	ptr = (uint32_t *)hba->pm_config;
1818 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1819 		(void) ddi_put32(hba->pci_acc_handle,
1820 		    (uint32_t *)(hba->pci_addr + i), *ptr);
1821 	}
1822 
1823 #ifdef FMA_SUPPORT
1824 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1825 	    != DDI_FM_OK) {
1826 		EMLXS_MSGF(EMLXS_CONTEXT,
1827 		    &emlxs_invalid_access_handle_msg, NULL);
1828 		return (1);
1829 	}
1830 #endif  /* FMA_SUPPORT */
1831 
1832 	/* Bring adapter online */
1833 	if ((rval = emlxs_online(hba))) {
1834 		(void) ddi_put8(hba->pci_acc_handle,
1835 		    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1836 		    (uint8_t)PCI_PM_D3_STATE);
1837 
1838 		return (rval);
1839 	}
1840 
1841 	return (rval);
1842 
1843 }  /* End emlxs_power_up */
1844 
1845 
1846 /*
1847  *
1848  * NAME:     emlxs_ffcleanup
1849  *
1850  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1851  *
1852  * EXECUTION ENVIRONMENT: process only
1853  *
1854  * CALLED FROM: CFG_TERM
1855  *
1856  * INPUT: hba       - pointer to the dev_ctl area.
1857  *
1858  * RETURNS: none
1859  */
1860 extern void
1861 emlxs_ffcleanup(emlxs_hba_t *hba)
1862 {
1863 	emlxs_port_t *port = &PPORT;
1864 	uint32_t i;
1865 
1866 	/* Disable all but the mailbox interrupt */
1867 	emlxs_disable_intr(hba, HC_MBINT_ENA);
1868 
1869 	/* Make sure all port nodes are destroyed */
1870 	for (i = 0; i < MAX_VPORTS; i++) {
1871 		port = &VPORT(i);
1872 
1873 		if (port->node_count) {
1874 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1875 		}
1876 	}
1877 
1878 	/* Clear all interrupt enable conditions */
1879 	emlxs_disable_intr(hba, 0);
1880 
1881 	return;
1882 
1883 }  /* emlxs_ffcleanup() */
1884 
1885 
1886 extern uint16_t
1887 emlxs_register_pkt(RING *rp, emlxs_buf_t *sbp)
1888 {
1889 	emlxs_hba_t *hba;
1890 	emlxs_port_t *port;
1891 	uint16_t iotag;
1892 	uint32_t i;
1893 
1894 	hba = rp->hba;
1895 
1896 	mutex_enter(&EMLXS_FCTAB_LOCK(rp->ringno));
1897 
1898 	if (sbp->iotag != 0) {
1899 		port = &PPORT;
1900 
1901 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1902 		    "Pkt already registered! ringo=%d iotag=%d sbp=%p",
1903 		    sbp->ring, sbp->iotag, sbp);
1904 	}
1905 
1906 	iotag = 0;
1907 	for (i = 0; i < rp->max_iotag; i++) {
1908 		if (!rp->fc_iotag || rp->fc_iotag >= rp->max_iotag) {
1909 			rp->fc_iotag = 1;
1910 		}
1911 		iotag = rp->fc_iotag++;
1912 
1913 		if (rp->fc_table[iotag] == 0 ||
1914 		    rp->fc_table[iotag] == STALE_PACKET) {
1915 			hba->io_count[rp->ringno]++;
1916 			rp->fc_table[iotag] = sbp;
1917 
1918 			sbp->iotag = iotag;
1919 			sbp->ring = rp;
1920 
1921 			break;
1922 		}
1923 		iotag = 0;
1924 	}
1925 
1926 	mutex_exit(&EMLXS_FCTAB_LOCK(rp->ringno));
1927 
1928 	/*
1929 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1930 	 *    "emlxs_register_pkt: ringo=%d iotag=%d sbp=%p",
1931 	 *    rp->ringno, iotag, sbp);
1932 	 */
1933 
1934 	return (iotag);
1935 
1936 }  /* emlxs_register_pkt() */
1937 
1938 
1939 
1940 extern emlxs_buf_t *
1941 emlxs_unregister_pkt(RING *rp, uint16_t iotag, uint32_t forced)
1942 {
1943 	emlxs_hba_t *hba;
1944 	emlxs_buf_t *sbp;
1945 	uint32_t ringno;
1946 
1947 	/* Check the iotag range */
1948 	if ((iotag == 0) || (iotag >= rp->max_iotag)) {
1949 		return (NULL);
1950 	}
1951 
1952 	sbp = NULL;
1953 	hba = rp->hba;
1954 	ringno = rp->ringno;
1955 
1956 	/* Remove the sbp from the table */
1957 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1958 	sbp = rp->fc_table[iotag];
1959 
1960 	if (!sbp || (sbp == STALE_PACKET)) {
1961 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1962 		return (sbp);
1963 	}
1964 
1965 	rp->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1966 	hba->io_count[ringno]--;
1967 	sbp->iotag = 0;
1968 
1969 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1970 
1971 
1972 	/* Clean up the sbp */
1973 	mutex_enter(&sbp->mtx);
1974 
1975 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
1976 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
1977 		hba->ring_tx_count[ringno]--;
1978 	}
1979 
1980 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1981 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1982 	}
1983 
1984 	if (sbp->bmp) {
1985 		(void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1986 		sbp->bmp = 0;
1987 	}
1988 
1989 	mutex_exit(&sbp->mtx);
1990 
1991 	return (sbp);
1992 
1993 }  /* emlxs_unregister_pkt() */
1994 
1995 
1996 
1997 /* Flush all IO's to all nodes for a given ring */
1998 extern uint32_t
1999 emlxs_tx_ring_flush(emlxs_hba_t *hba, RING *rp, emlxs_buf_t *fpkt)
2000 {
2001 	emlxs_port_t *port = &PPORT;
2002 	emlxs_buf_t *sbp;
2003 	IOCBQ *iocbq;
2004 	IOCBQ *next;
2005 	IOCB *iocb;
2006 	uint32_t ringno;
2007 	Q abort;
2008 	NODELIST *ndlp;
2009 	IOCB *icmd;
2010 	MATCHMAP *mp;
2011 	uint32_t i;
2012 
2013 	ringno = rp->ringno;
2014 	bzero((void *)&abort, sizeof (Q));
2015 
2016 	mutex_enter(&EMLXS_RINGTX_LOCK);
2017 
2018 	/* While a node needs servicing */
2019 	while (rp->nodeq.q_first) {
2020 		ndlp = (NODELIST *) rp->nodeq.q_first;
2021 
2022 		/* Check if priority queue is not empty */
2023 		if (ndlp->nlp_ptx[ringno].q_first) {
2024 			/* Transfer all iocb's to local queue */
2025 			if (abort.q_first == 0) {
2026 				abort.q_first = ndlp->nlp_ptx[ringno].q_first;
2027 			} else {
2028 				((IOCBQ *)abort.q_last)->next =
2029 				    (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
2030 			}
2031 
2032 			abort.q_last = ndlp->nlp_ptx[ringno].q_last;
2033 			abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2034 		}
2035 
2036 		/* Check if tx queue is not empty */
2037 		if (ndlp->nlp_tx[ringno].q_first) {
2038 			/* Transfer all iocb's to local queue */
2039 			if (abort.q_first == 0) {
2040 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
2041 			} else {
2042 				((IOCBQ *)abort.q_last)->next =
2043 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2044 			}
2045 
2046 			abort.q_last = ndlp->nlp_tx[ringno].q_last;
2047 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2048 
2049 		}
2050 
2051 		/* Clear the queue pointers */
2052 		ndlp->nlp_ptx[ringno].q_first = NULL;
2053 		ndlp->nlp_ptx[ringno].q_last = NULL;
2054 		ndlp->nlp_ptx[ringno].q_cnt = 0;
2055 
2056 		ndlp->nlp_tx[ringno].q_first = NULL;
2057 		ndlp->nlp_tx[ringno].q_last = NULL;
2058 		ndlp->nlp_tx[ringno].q_cnt = 0;
2059 
2060 		/* Remove node from service queue */
2061 
2062 		/* If this is the last node on list */
2063 		if (rp->nodeq.q_last == (void *)ndlp) {
2064 			rp->nodeq.q_last = NULL;
2065 			rp->nodeq.q_first = NULL;
2066 			rp->nodeq.q_cnt = 0;
2067 		} else {
2068 			/* Remove node from head */
2069 			rp->nodeq.q_first = ndlp->nlp_next[ringno];
2070 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2071 			    rp->nodeq.q_first;
2072 			rp->nodeq.q_cnt--;
2073 		}
2074 
2075 		/* Clear node */
2076 		ndlp->nlp_next[ringno] = NULL;
2077 	}
2078 
2079 	/* First cleanup the iocb's while still holding the lock */
2080 	iocbq = (IOCBQ *) abort.q_first;
2081 	while (iocbq) {
2082 		/* Free the IoTag and the bmp */
2083 		iocb = &iocbq->iocb;
2084 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2085 
2086 		if (sbp && (sbp != STALE_PACKET)) {
2087 			mutex_enter(&sbp->mtx);
2088 
2089 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2090 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2091 				hba->ring_tx_count[ringno]--;
2092 			}
2093 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2094 
2095 			/*
2096 			 * If the fpkt is already set, then we will leave it
2097 			 * alone. This ensures that this pkt is only accounted
2098 			 * for on one fpkt->flush_count
2099 			 */
2100 			if (!sbp->fpkt && fpkt) {
2101 				mutex_enter(&fpkt->mtx);
2102 				sbp->fpkt = fpkt;
2103 				fpkt->flush_count++;
2104 				mutex_exit(&fpkt->mtx);
2105 			}
2106 
2107 			mutex_exit(&sbp->mtx);
2108 		}
2109 
2110 		iocbq = (IOCBQ *)iocbq->next;
2111 
2112 	}	/* end of while */
2113 
2114 	mutex_exit(&EMLXS_RINGTX_LOCK);
2115 
2116 	/* Now abort the iocb's */
2117 	iocbq = (IOCBQ *)abort.q_first;
2118 	while (iocbq) {
2119 		/* Save the next iocbq for now */
2120 		next = (IOCBQ *)iocbq->next;
2121 
2122 		/* Unlink this iocbq */
2123 		iocbq->next = NULL;
2124 
2125 		/* Get the pkt */
2126 		sbp = (emlxs_buf_t *)iocbq->sbp;
2127 
2128 		if (sbp) {
2129 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2130 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2131 
2132 			if (hba->state >= FC_LINK_UP) {
2133 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2134 				    IOERR_ABORT_REQUESTED, 1);
2135 			} else {
2136 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2137 				    IOERR_LINK_DOWN, 1);
2138 			}
2139 
2140 		}
2141 		/* Free the iocb and its associated buffers */
2142 		else {
2143 			icmd = &iocbq->iocb;
2144 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2145 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2146 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2147 				if ((hba->flag &
2148 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2149 					/* HBA is detaching or offlining */
2150 					if (icmd->ulpCommand !=
2151 					    CMD_QUE_RING_LIST64_CN) {
2152 						uint8_t	*tmp;
2153 
2154 						for (i = 0;
2155 						    i < icmd->ulpBdeCount;
2156 						    i++) {
2157 							mp = EMLXS_GET_VADDR(
2158 							    hba, rp, icmd);
2159 
2160 							tmp = (uint8_t *)mp;
2161 							if (mp) {
2162 							(void) emlxs_mem_put(
2163 							    hba, MEM_BUF, tmp);
2164 							}
2165 						}
2166 					}
2167 
2168 					(void) emlxs_mem_put(hba, MEM_IOCB,
2169 					    (uint8_t *)iocbq);
2170 				} else {
2171 					/* repost the unsolicited buffer */
2172 					emlxs_sli_issue_iocb_cmd(hba, rp,
2173 					    iocbq);
2174 				}
2175 			}
2176 		}
2177 
2178 		iocbq = next;
2179 
2180 	}	/* end of while */
2181 
2182 	return (abort.q_cnt);
2183 
2184 }  /* emlxs_tx_ring_flush() */
2185 
2186 
2187 /* Flush all IO's on all or a given ring for a given node */
2188 extern uint32_t
2189 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, RING *ring,
2190     uint32_t shutdown, emlxs_buf_t *fpkt)
2191 {
2192 	emlxs_hba_t *hba = HBA;
2193 	emlxs_buf_t *sbp;
2194 	uint32_t ringno;
2195 	RING *rp;
2196 	IOCB *icmd;
2197 	IOCBQ *iocbq;
2198 	NODELIST *prev;
2199 	IOCBQ *next;
2200 	IOCB *iocb;
2201 	Q abort;
2202 	uint32_t i;
2203 	MATCHMAP *mp;
2204 
2205 
2206 	bzero((void *)&abort, sizeof (Q));
2207 
2208 	/* Flush all I/O's on tx queue to this target */
2209 	mutex_enter(&EMLXS_RINGTX_LOCK);
2210 
2211 	if (!ndlp->nlp_base && shutdown) {
2212 		ndlp->nlp_active = 0;
2213 	}
2214 
2215 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2216 		rp = &hba->ring[ringno];
2217 
2218 		if (ring && rp != ring) {
2219 			continue;
2220 		}
2221 
2222 		if (!ndlp->nlp_base || shutdown) {
2223 			/* Check if priority queue is not empty */
2224 			if (ndlp->nlp_ptx[ringno].q_first) {
2225 				/* Transfer all iocb's to local queue */
2226 				if (abort.q_first == 0) {
2227 					abort.q_first =
2228 					    ndlp->nlp_ptx[ringno].q_first;
2229 				} else {
2230 					((IOCBQ *)abort.q_last)->next =
2231 					    (IOCBQ *)ndlp->nlp_ptx[ringno].
2232 					    q_first;
2233 				}
2234 
2235 				abort.q_last = ndlp->nlp_ptx[ringno].q_last;
2236 				abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2237 			}
2238 		}
2239 
2240 		/* Check if tx queue is not empty */
2241 		if (ndlp->nlp_tx[ringno].q_first) {
2242 			/* Transfer all iocb's to local queue */
2243 			if (abort.q_first == 0) {
2244 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
2245 			} else {
2246 				((IOCBQ *)abort.q_last)->next =
2247 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2248 			}
2249 
2250 			abort.q_last = ndlp->nlp_tx[ringno].q_last;
2251 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2252 		}
2253 
2254 		/* Clear the queue pointers */
2255 		ndlp->nlp_ptx[ringno].q_first = NULL;
2256 		ndlp->nlp_ptx[ringno].q_last = NULL;
2257 		ndlp->nlp_ptx[ringno].q_cnt = 0;
2258 
2259 		ndlp->nlp_tx[ringno].q_first = NULL;
2260 		ndlp->nlp_tx[ringno].q_last = NULL;
2261 		ndlp->nlp_tx[ringno].q_cnt = 0;
2262 
2263 		/* If this node was on the ring queue, remove it */
2264 		if (ndlp->nlp_next[ringno]) {
2265 			/* If this is the only node on list */
2266 			if (rp->nodeq.q_first == (void *)ndlp &&
2267 			    rp->nodeq.q_last == (void *)ndlp) {
2268 				rp->nodeq.q_last = NULL;
2269 				rp->nodeq.q_first = NULL;
2270 				rp->nodeq.q_cnt = 0;
2271 			} else if (rp->nodeq.q_first == (void *)ndlp) {
2272 				rp->nodeq.q_first = ndlp->nlp_next[ringno];
2273 				((NODELIST *) rp->nodeq.q_last)->
2274 				    nlp_next[ringno] = rp->nodeq.q_first;
2275 				rp->nodeq.q_cnt--;
2276 			} else {
2277 				/*
2278 				 * This is a little more difficult find the
2279 				 * previous node in the circular ring queue
2280 				 */
2281 				prev = ndlp;
2282 				while (prev->nlp_next[ringno] != ndlp) {
2283 					prev = prev->nlp_next[ringno];
2284 				}
2285 
2286 				prev->nlp_next[ringno] =
2287 				    ndlp->nlp_next[ringno];
2288 
2289 				if (rp->nodeq.q_last == (void *)ndlp) {
2290 					rp->nodeq.q_last = (void *)prev;
2291 				}
2292 				rp->nodeq.q_cnt--;
2293 
2294 			}
2295 
2296 			/* Clear node */
2297 			ndlp->nlp_next[ringno] = NULL;
2298 		}
2299 
2300 	}
2301 
2302 	/* First cleanup the iocb's while still holding the lock */
2303 	iocbq = (IOCBQ *) abort.q_first;
2304 	while (iocbq) {
2305 		/* Free the IoTag and the bmp */
2306 		iocb = &iocbq->iocb;
2307 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2308 
2309 		if (sbp && (sbp != STALE_PACKET)) {
2310 			mutex_enter(&sbp->mtx);
2311 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2312 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2313 				hba->ring_tx_count[ring->ringno]--;
2314 			}
2315 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2316 
2317 			/*
2318 			 * If the fpkt is already set, then we will leave it
2319 			 * alone. This ensures that this pkt is only accounted
2320 			 * for on one fpkt->flush_count
2321 			 */
2322 			if (!sbp->fpkt && fpkt) {
2323 				mutex_enter(&fpkt->mtx);
2324 				sbp->fpkt = fpkt;
2325 				fpkt->flush_count++;
2326 				mutex_exit(&fpkt->mtx);
2327 			}
2328 
2329 			mutex_exit(&sbp->mtx);
2330 		}
2331 
2332 		iocbq = (IOCBQ *) iocbq->next;
2333 
2334 	}	/* end of while */
2335 
2336 	mutex_exit(&EMLXS_RINGTX_LOCK);
2337 
2338 	/* Now abort the iocb's outside the locks */
2339 	iocbq = (IOCBQ *)abort.q_first;
2340 	while (iocbq) {
2341 		/* Save the next iocbq for now */
2342 		next = (IOCBQ *)iocbq->next;
2343 
2344 		/* Unlink this iocbq */
2345 		iocbq->next = NULL;
2346 
2347 		/* Get the pkt */
2348 		sbp = (emlxs_buf_t *)iocbq->sbp;
2349 
2350 		if (sbp) {
2351 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2352 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2353 
2354 			if (hba->state >= FC_LINK_UP) {
2355 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2356 				    IOERR_ABORT_REQUESTED, 1);
2357 			} else {
2358 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2359 				    IOERR_LINK_DOWN, 1);
2360 			}
2361 
2362 		}
2363 		/* Free the iocb and its associated buffers */
2364 		else {
2365 			icmd = &iocbq->iocb;
2366 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2367 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2368 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2369 				if ((hba->flag &
2370 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2371 					/* HBA is detaching or offlining */
2372 					if (icmd->ulpCommand !=
2373 					    CMD_QUE_RING_LIST64_CN) {
2374 						uint8_t	*tmp;
2375 
2376 						for (i = 0;
2377 						    i < icmd->ulpBdeCount;
2378 						    i++) {
2379 							mp = EMLXS_GET_VADDR(
2380 							    hba, rp, icmd);
2381 
2382 							tmp = (uint8_t *)mp;
2383 							if (mp) {
2384 							(void) emlxs_mem_put(
2385 							    hba, MEM_BUF, tmp);
2386 							}
2387 						}
2388 					}
2389 
2390 					(void) emlxs_mem_put(hba, MEM_IOCB,
2391 					    (uint8_t *)iocbq);
2392 				} else {
2393 					/* repost the unsolicited buffer */
2394 					emlxs_sli_issue_iocb_cmd(hba, rp,
2395 					    iocbq);
2396 				}
2397 			}
2398 		}
2399 
2400 		iocbq = next;
2401 
2402 	}	/* end of while */
2403 
2404 	return (abort.q_cnt);
2405 
2406 }  /* emlxs_tx_node_flush() */
2407 
2408 
2409 /* Check for IO's on all or a given ring for a given node */
2410 extern uint32_t
2411 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, RING *ring)
2412 {
2413 	emlxs_hba_t *hba = HBA;
2414 	uint32_t ringno;
2415 	RING *rp;
2416 	uint32_t count;
2417 
2418 	count = 0;
2419 
2420 	/* Flush all I/O's on tx queue to this target */
2421 	mutex_enter(&EMLXS_RINGTX_LOCK);
2422 
2423 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2424 		rp = &hba->ring[ringno];
2425 
2426 		if (ring && rp != ring) {
2427 			continue;
2428 		}
2429 
2430 		/* Check if priority queue is not empty */
2431 		if (ndlp->nlp_ptx[ringno].q_first) {
2432 			count += ndlp->nlp_ptx[ringno].q_cnt;
2433 		}
2434 
2435 		/* Check if tx queue is not empty */
2436 		if (ndlp->nlp_tx[ringno].q_first) {
2437 			count += ndlp->nlp_tx[ringno].q_cnt;
2438 		}
2439 
2440 	}
2441 
2442 	mutex_exit(&EMLXS_RINGTX_LOCK);
2443 
2444 	return (count);
2445 
2446 }  /* emlxs_tx_node_check() */
2447 
2448 
2449 
2450 /* Flush all IO's on the FCP ring for a given node's lun */
2451 extern uint32_t
2452 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2453     emlxs_buf_t *fpkt)
2454 {
2455 	emlxs_hba_t *hba = HBA;
2456 	emlxs_buf_t *sbp;
2457 	uint32_t ringno;
2458 	IOCBQ *iocbq;
2459 	IOCBQ *prev;
2460 	IOCBQ *next;
2461 	IOCB *iocb;
2462 	IOCB *icmd;
2463 	Q abort;
2464 	uint32_t i;
2465 	MATCHMAP *mp;
2466 	RING *rp;
2467 
2468 	ringno = FC_FCP_RING;
2469 	rp = &hba->ring[ringno];
2470 
2471 	bzero((void *)&abort, sizeof (Q));
2472 
2473 	/* Flush I/O's on txQ to this target's lun */
2474 	mutex_enter(&EMLXS_RINGTX_LOCK);
2475 
2476 	/* Scan the priority queue first */
2477 	prev = NULL;
2478 	iocbq = (IOCBQ *) ndlp->nlp_ptx[ringno].q_first;
2479 
2480 	while (iocbq) {
2481 		next = (IOCBQ *)iocbq->next;
2482 		iocb = &iocbq->iocb;
2483 		sbp = (emlxs_buf_t *)iocbq->sbp;
2484 
2485 		/* Check if this IO is for our lun */
2486 		if (sbp->lun == lun) {
2487 			/* Remove iocb from the node's tx queue */
2488 			if (next == 0) {
2489 				ndlp->nlp_ptx[ringno].q_last =
2490 				    (uint8_t *)prev;
2491 			}
2492 
2493 			if (prev == 0) {
2494 				ndlp->nlp_ptx[ringno].q_first =
2495 				    (uint8_t *)next;
2496 			} else {
2497 				prev->next = next;
2498 			}
2499 
2500 			iocbq->next = NULL;
2501 			ndlp->nlp_ptx[ringno].q_cnt--;
2502 
2503 			/*
2504 			 * Add this iocb to our local abort Q
2505 			 * This way we don't hold the RINGTX lock too long
2506 			 */
2507 			if (abort.q_first) {
2508 				((IOCBQ *)abort.q_last)->next = iocbq;
2509 				abort.q_last = (uint8_t *)iocbq;
2510 				abort.q_cnt++;
2511 			} else {
2512 				abort.q_first = (uint8_t *)iocbq;
2513 				abort.q_last = (uint8_t *)iocbq;
2514 				abort.q_cnt = 1;
2515 			}
2516 			iocbq->next = NULL;
2517 		} else {
2518 			prev = iocbq;
2519 		}
2520 
2521 		iocbq = next;
2522 
2523 	}	/* while (iocbq) */
2524 
2525 
2526 	/* Scan the regular queue */
2527 	prev = NULL;
2528 	iocbq = (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2529 
2530 	while (iocbq) {
2531 		next = (IOCBQ *)iocbq->next;
2532 		iocb = &iocbq->iocb;
2533 		sbp = (emlxs_buf_t *)iocbq->sbp;
2534 
2535 		/* Check if this IO is for our lun */
2536 		if (sbp->lun == lun) {
2537 			/* Remove iocb from the node's tx queue */
2538 			if (next == 0) {
2539 				ndlp->nlp_tx[ringno].q_last =
2540 				    (uint8_t *)prev;
2541 			}
2542 
2543 			if (prev == 0) {
2544 				ndlp->nlp_tx[ringno].q_first =
2545 				    (uint8_t *)next;
2546 			} else {
2547 				prev->next = next;
2548 			}
2549 
2550 			iocbq->next = NULL;
2551 			ndlp->nlp_tx[ringno].q_cnt--;
2552 
2553 			/*
2554 			 * Add this iocb to our local abort Q
2555 			 * This way we don't hold the RINGTX lock too long
2556 			 */
2557 			if (abort.q_first) {
2558 				((IOCBQ *) abort.q_last)->next = iocbq;
2559 				abort.q_last = (uint8_t *)iocbq;
2560 				abort.q_cnt++;
2561 			} else {
2562 				abort.q_first = (uint8_t *)iocbq;
2563 				abort.q_last = (uint8_t *)iocbq;
2564 				abort.q_cnt = 1;
2565 			}
2566 			iocbq->next = NULL;
2567 		} else {
2568 			prev = iocbq;
2569 		}
2570 
2571 		iocbq = next;
2572 
2573 	}	/* while (iocbq) */
2574 
2575 	/* First cleanup the iocb's while still holding the lock */
2576 	iocbq = (IOCBQ *)abort.q_first;
2577 	while (iocbq) {
2578 		/* Free the IoTag and the bmp */
2579 		iocb = &iocbq->iocb;
2580 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2581 
2582 		if (sbp && (sbp != STALE_PACKET)) {
2583 			mutex_enter(&sbp->mtx);
2584 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2585 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2586 				hba->ring_tx_count[ringno]--;
2587 			}
2588 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2589 
2590 			/*
2591 			 * If the fpkt is already set, then we will leave it
2592 			 * alone. This ensures that this pkt is only accounted
2593 			 * for on one fpkt->flush_count
2594 			 */
2595 			if (!sbp->fpkt && fpkt) {
2596 				mutex_enter(&fpkt->mtx);
2597 				sbp->fpkt = fpkt;
2598 				fpkt->flush_count++;
2599 				mutex_exit(&fpkt->mtx);
2600 			}
2601 
2602 			mutex_exit(&sbp->mtx);
2603 		}
2604 
2605 		iocbq = (IOCBQ *) iocbq->next;
2606 
2607 	}	/* end of while */
2608 
2609 	mutex_exit(&EMLXS_RINGTX_LOCK);
2610 
2611 	/* Now abort the iocb's outside the locks */
2612 	iocbq = (IOCBQ *)abort.q_first;
2613 	while (iocbq) {
2614 		/* Save the next iocbq for now */
2615 		next = (IOCBQ *)iocbq->next;
2616 
2617 		/* Unlink this iocbq */
2618 		iocbq->next = NULL;
2619 
2620 		/* Get the pkt */
2621 		sbp = (emlxs_buf_t *)iocbq->sbp;
2622 
2623 		if (sbp) {
2624 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2625 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2626 
2627 			if (hba->state >= FC_LINK_UP) {
2628 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2629 				    IOERR_ABORT_REQUESTED, 1);
2630 			} else {
2631 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2632 				    IOERR_LINK_DOWN, 1);
2633 			}
2634 		}
2635 
2636 		/* Free the iocb and its associated buffers */
2637 		else {
2638 			icmd = &iocbq->iocb;
2639 
2640 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2641 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2642 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2643 				if ((hba->flag &
2644 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2645 					/* HBA is detaching or offlining */
2646 					if (icmd->ulpCommand !=
2647 					    CMD_QUE_RING_LIST64_CN) {
2648 						uint8_t	*tmp;
2649 
2650 						for (i = 0;
2651 						    i < icmd->ulpBdeCount;
2652 						    i++) {
2653 							mp = EMLXS_GET_VADDR(
2654 							    hba, rp, icmd);
2655 
2656 							tmp = (uint8_t *)mp;
2657 							if (mp) {
2658 							(void) emlxs_mem_put(
2659 							    hba, MEM_BUF, tmp);
2660 							}
2661 						}
2662 					}
2663 
2664 					(void) emlxs_mem_put(hba, MEM_IOCB,
2665 					    (uint8_t *)iocbq);
2666 				} else {
2667 					/* repost the unsolicited buffer */
2668 					emlxs_sli_issue_iocb_cmd(hba, rp,
2669 					    iocbq);
2670 				}
2671 			}
2672 		}
2673 
2674 		iocbq = next;
2675 
2676 	}	/* end of while */
2677 
2678 
2679 	return (abort.q_cnt);
2680 
2681 }  /* emlxs_tx_lun_flush() */
2682 
2683 
2684 extern void
2685 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2686 {
2687 	emlxs_hba_t *hba;
2688 	emlxs_port_t *port;
2689 	uint32_t ringno;
2690 	NODELIST *nlp;
2691 	RING *rp;
2692 	emlxs_buf_t *sbp;
2693 
2694 	port = (emlxs_port_t *)iocbq->port;
2695 	hba = HBA;
2696 	rp = (RING *)iocbq->ring;
2697 	nlp = (NODELIST *)iocbq->node;
2698 	ringno = rp->ringno;
2699 	sbp = (emlxs_buf_t *)iocbq->sbp;
2700 
2701 	if (nlp == NULL) {
2702 		/* Set node to base node by default */
2703 		nlp = &port->node_base;
2704 
2705 		iocbq->node = (void *)nlp;
2706 
2707 		if (sbp) {
2708 			sbp->node = (void *)nlp;
2709 		}
2710 	}
2711 
2712 	if (lock) {
2713 		mutex_enter(&EMLXS_RINGTX_LOCK);
2714 	}
2715 
2716 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2717 		if (sbp) {
2718 			mutex_enter(&sbp->mtx);
2719 
2720 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2721 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2722 				hba->ring_tx_count[ringno]--;
2723 			}
2724 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2725 
2726 			mutex_exit(&sbp->mtx);
2727 
2728 			/* Free the ulpIoTag and the bmp */
2729 			(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
2730 
2731 			if (lock) {
2732 				mutex_exit(&EMLXS_RINGTX_LOCK);
2733 			}
2734 
2735 			if (hba->state >= FC_LINK_UP) {
2736 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2737 				    IOERR_ABORT_REQUESTED, 1);
2738 			} else {
2739 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2740 				    IOERR_LINK_DOWN, 1);
2741 			}
2742 			return;
2743 		} else {
2744 			if (lock) {
2745 				mutex_exit(&EMLXS_RINGTX_LOCK);
2746 			}
2747 
2748 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2749 		}
2750 
2751 		return;
2752 	}
2753 
2754 	if (sbp) {
2755 
2756 		mutex_enter(&sbp->mtx);
2757 
2758 		if (sbp->pkt_flags &
2759 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2760 			mutex_exit(&sbp->mtx);
2761 			if (lock) {
2762 				mutex_exit(&EMLXS_RINGTX_LOCK);
2763 			}
2764 			return;
2765 		}
2766 
2767 		sbp->pkt_flags |= PACKET_IN_TXQ;
2768 		hba->ring_tx_count[ringno]++;
2769 
2770 		mutex_exit(&sbp->mtx);
2771 	}
2772 
2773 
2774 	/* Check iocbq priority */
2775 	if (iocbq->flag & IOCB_PRIORITY) {
2776 		/* Add the iocb to the bottom of the node's ptx queue */
2777 		if (nlp->nlp_ptx[ringno].q_first) {
2778 			((IOCBQ *)nlp->nlp_ptx[ringno].q_last)->next = iocbq;
2779 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2780 			nlp->nlp_ptx[ringno].q_cnt++;
2781 		} else {
2782 			nlp->nlp_ptx[ringno].q_first = (uint8_t *)iocbq;
2783 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2784 			nlp->nlp_ptx[ringno].q_cnt = 1;
2785 		}
2786 
2787 		iocbq->next = NULL;
2788 	} else {	/* Normal priority */
2789 
2790 
2791 		/* Add the iocb to the bottom of the node's tx queue */
2792 		if (nlp->nlp_tx[ringno].q_first) {
2793 			((IOCBQ *)nlp->nlp_tx[ringno].q_last)->next = iocbq;
2794 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2795 			nlp->nlp_tx[ringno].q_cnt++;
2796 		} else {
2797 			nlp->nlp_tx[ringno].q_first = (uint8_t *)iocbq;
2798 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2799 			nlp->nlp_tx[ringno].q_cnt = 1;
2800 		}
2801 
2802 		iocbq->next = NULL;
2803 	}
2804 
2805 
2806 	/*
2807 	 * Check if the node is not already on ring queue and
2808 	 * (is not closed or  is a priority request)
2809 	 */
2810 	if (!nlp->nlp_next[ringno] && (!(nlp->nlp_flag[ringno] & NLP_CLOSED) ||
2811 	    (iocbq->flag & IOCB_PRIORITY))) {
2812 		/* If so, then add it to the ring queue */
2813 		if (rp->nodeq.q_first) {
2814 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2815 			    (uint8_t *)nlp;
2816 			nlp->nlp_next[ringno] = rp->nodeq.q_first;
2817 
2818 			/*
2819 			 * If this is not the base node then add it
2820 			 * to the tail
2821 			 */
2822 			if (!nlp->nlp_base) {
2823 				rp->nodeq.q_last = (uint8_t *)nlp;
2824 			} else {	/* Otherwise, add it to the head */
2825 
2826 				/* The command node always gets priority */
2827 				rp->nodeq.q_first = (uint8_t *)nlp;
2828 			}
2829 
2830 			rp->nodeq.q_cnt++;
2831 		} else {
2832 			rp->nodeq.q_first = (uint8_t *)nlp;
2833 			rp->nodeq.q_last = (uint8_t *)nlp;
2834 			nlp->nlp_next[ringno] = nlp;
2835 			rp->nodeq.q_cnt = 1;
2836 		}
2837 	}
2838 
2839 	HBASTATS.IocbTxPut[ringno]++;
2840 
2841 	/* Adjust the ring timeout timer */
2842 	rp->timeout = hba->timer_tics + 5;
2843 
2844 	if (lock) {
2845 		mutex_exit(&EMLXS_RINGTX_LOCK);
2846 	}
2847 
2848 	return;
2849 
2850 }  /* emlxs_tx_put() */
2851 
2852 
2853 extern IOCBQ *
2854 emlxs_tx_get(RING *rp, uint32_t lock)
2855 {
2856 	emlxs_hba_t *hba;
2857 	uint32_t ringno;
2858 	IOCBQ *iocbq;
2859 	NODELIST *nlp;
2860 	emlxs_buf_t *sbp;
2861 
2862 	hba = rp->hba;
2863 	ringno = rp->ringno;
2864 
2865 	if (lock) {
2866 		mutex_enter(&EMLXS_RINGTX_LOCK);
2867 	}
2868 
2869 begin:
2870 
2871 	iocbq = NULL;
2872 
2873 	/* Check if a node needs servicing */
2874 	if (rp->nodeq.q_first) {
2875 		nlp = (NODELIST *)rp->nodeq.q_first;
2876 
2877 		/* Get next iocb from node's priority queue */
2878 
2879 		if (nlp->nlp_ptx[ringno].q_first) {
2880 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
2881 
2882 			/* Check if this is last entry */
2883 			if (nlp->nlp_ptx[ringno].q_last == (void *)iocbq) {
2884 				nlp->nlp_ptx[ringno].q_first = NULL;
2885 				nlp->nlp_ptx[ringno].q_last = NULL;
2886 				nlp->nlp_ptx[ringno].q_cnt = 0;
2887 			} else {
2888 				/* Remove iocb from head */
2889 				nlp->nlp_ptx[ringno].q_first =
2890 				    (void *)iocbq->next;
2891 				nlp->nlp_ptx[ringno].q_cnt--;
2892 			}
2893 
2894 			iocbq->next = NULL;
2895 		}
2896 
2897 		/* Get next iocb from node tx queue if node not closed */
2898 		else if (nlp->nlp_tx[ringno].q_first &&
2899 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED)) {
2900 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
2901 
2902 			/* Check if this is last entry */
2903 			if (nlp->nlp_tx[ringno].q_last == (void *)iocbq) {
2904 				nlp->nlp_tx[ringno].q_first = NULL;
2905 				nlp->nlp_tx[ringno].q_last = NULL;
2906 				nlp->nlp_tx[ringno].q_cnt = 0;
2907 			} else {
2908 				/* Remove iocb from head */
2909 				nlp->nlp_tx[ringno].q_first =
2910 				    (void *)iocbq->next;
2911 				nlp->nlp_tx[ringno].q_cnt--;
2912 			}
2913 
2914 			iocbq->next = NULL;
2915 		}
2916 
2917 		/* Now deal with node itself */
2918 
2919 		/* Check if node still needs servicing */
2920 		if ((nlp->nlp_ptx[ringno].q_first) ||
2921 		    (nlp->nlp_tx[ringno].q_first &&
2922 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
2923 
2924 			/*
2925 			 * If this is the base node, then don't shift the
2926 			 * pointers. We want to drain the base node before
2927 			 * moving on
2928 			 */
2929 			if (!nlp->nlp_base) {
2930 				/*
2931 				 * Just shift ring queue pointers to next
2932 				 * node
2933 				 */
2934 				rp->nodeq.q_last = (void *)nlp;
2935 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2936 			}
2937 		} else {
2938 			/* Remove node from ring queue */
2939 
2940 			/* If this is the last node on list */
2941 			if (rp->nodeq.q_last == (void *)nlp) {
2942 				rp->nodeq.q_last = NULL;
2943 				rp->nodeq.q_first = NULL;
2944 				rp->nodeq.q_cnt = 0;
2945 			} else {
2946 				/* Remove node from head */
2947 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2948 				((NODELIST *)rp->nodeq.q_last)->
2949 				    nlp_next[ringno] = rp->nodeq.q_first;
2950 				rp->nodeq.q_cnt--;
2951 
2952 			}
2953 
2954 			/* Clear node */
2955 			nlp->nlp_next[ringno] = NULL;
2956 		}
2957 
2958 		/*
2959 		 * If no iocbq was found on this node, then it will have
2960 		 * been removed. So try again.
2961 		 */
2962 		if (!iocbq) {
2963 			goto begin;
2964 		}
2965 
2966 		sbp = (emlxs_buf_t *)iocbq->sbp;
2967 
2968 		if (sbp) {
2969 			/*
2970 			 * Check flags before we enter mutex in case this
2971 			 * has been flushed and destroyed
2972 			 */
2973 			if ((sbp->pkt_flags &
2974 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2975 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2976 				goto begin;
2977 			}
2978 
2979 			mutex_enter(&sbp->mtx);
2980 
2981 			if ((sbp->pkt_flags &
2982 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2983 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2984 				mutex_exit(&sbp->mtx);
2985 				goto begin;
2986 			}
2987 
2988 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
2989 			hba->ring_tx_count[ringno]--;
2990 
2991 			mutex_exit(&sbp->mtx);
2992 		}
2993 	}
2994 
2995 	if (iocbq) {
2996 		HBASTATS.IocbTxGet[ringno]++;
2997 	}
2998 
2999 	/* Adjust the ring timeout timer */
3000 	rp->timeout = (rp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3001 
3002 	if (lock) {
3003 		mutex_exit(&EMLXS_RINGTX_LOCK);
3004 	}
3005 
3006 	return (iocbq);
3007 
3008 }  /* emlxs_tx_get() */
3009 
3010 
3011 
3012 extern uint32_t
3013 emlxs_chipq_node_flush(emlxs_port_t *port, RING *ring, NODELIST *ndlp,
3014     emlxs_buf_t *fpkt)
3015 {
3016 	emlxs_hba_t *hba = HBA;
3017 	emlxs_buf_t *sbp;
3018 	IOCBQ *iocbq;
3019 	IOCBQ *next;
3020 	Q abort;
3021 	RING *rp;
3022 	uint32_t ringno;
3023 	uint8_t flag[MAX_RINGS];
3024 	uint32_t iotag;
3025 
3026 	bzero((void *)&abort, sizeof (Q));
3027 	bzero((void *)flag, sizeof (flag));
3028 
3029 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3030 		rp = &hba->ring[ringno];
3031 
3032 		if (ring && rp != ring) {
3033 			continue;
3034 		}
3035 
3036 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3037 
3038 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3039 			sbp = rp->fc_table[iotag];
3040 
3041 			if (sbp && (sbp != STALE_PACKET) &&
3042 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3043 			    (sbp->node == ndlp) &&
3044 			    (sbp->ring == rp) &&
3045 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3046 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3047 				    fpkt);
3048 			}
3049 
3050 		}
3051 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3052 
3053 	}	/* for */
3054 
3055 	/* Now put the iocb's on the tx queue */
3056 	iocbq = (IOCBQ *)abort.q_first;
3057 	while (iocbq) {
3058 		/* Save the next iocbq for now */
3059 		next = (IOCBQ *)iocbq->next;
3060 
3061 		/* Unlink this iocbq */
3062 		iocbq->next = NULL;
3063 
3064 		/* Send this iocbq */
3065 		emlxs_tx_put(iocbq, 1);
3066 
3067 		iocbq = next;
3068 	}
3069 
3070 	/* Now trigger ring service */
3071 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3072 		if (!flag[ringno]) {
3073 			continue;
3074 		}
3075 
3076 		rp = &hba->ring[ringno];
3077 
3078 		emlxs_sli_issue_iocb_cmd(hba, rp, 0);
3079 	}
3080 
3081 	return (abort.q_cnt);
3082 
3083 }  /* emlxs_chipq_node_flush() */
3084 
3085 
3086 /* Flush all IO's left on all iotag lists */
3087 static uint32_t
3088 emlxs_iotag_flush(emlxs_hba_t *hba)
3089 {
3090 	emlxs_port_t *port = &PPORT;
3091 	emlxs_buf_t *sbp;
3092 	IOCBQ *iocbq;
3093 	IOCB *iocb;
3094 	Q abort;
3095 	RING *rp;
3096 	uint32_t ringno;
3097 	uint32_t iotag;
3098 	uint32_t count;
3099 
3100 	count = 0;
3101 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3102 		rp = &hba->ring[ringno];
3103 
3104 		bzero((void *)&abort, sizeof (Q));
3105 
3106 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3107 
3108 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3109 			sbp = rp->fc_table[iotag];
3110 
3111 			if (!sbp || (sbp == STALE_PACKET)) {
3112 				continue;
3113 			}
3114 
3115 			/* Unregister the packet */
3116 			rp->fc_table[iotag] = STALE_PACKET;
3117 			hba->io_count[ringno]--;
3118 			sbp->iotag = 0;
3119 
3120 			/* Clean up the sbp */
3121 			mutex_enter(&sbp->mtx);
3122 
3123 			/* Set IOCB status */
3124 			iocbq = &sbp->iocbq;
3125 			iocb = &iocbq->iocb;
3126 
3127 			iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
3128 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3129 			iocb->ulpLe = 1;
3130 			iocbq->next = NULL;
3131 
3132 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
3133 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
3134 				hba->ring_tx_count[ringno]--;
3135 			}
3136 
3137 			if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3138 				sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3139 			}
3140 
3141 			if (sbp->bmp) {
3142 				(void) emlxs_mem_put(hba, MEM_BPL,
3143 				    (uint8_t *)sbp->bmp);
3144 				sbp->bmp = 0;
3145 			}
3146 
3147 			/* At this point all nodes are assumed destroyed */
3148 			sbp->node = 0;
3149 
3150 			mutex_exit(&sbp->mtx);
3151 
3152 			/* Add this iocb to our local abort Q */
3153 			if (abort.q_first) {
3154 				((IOCBQ *)abort.q_last)->next = iocbq;
3155 				abort.q_last = (uint8_t *)iocbq;
3156 				abort.q_cnt++;
3157 			} else {
3158 				abort.q_first = (uint8_t *)iocbq;
3159 				abort.q_last = (uint8_t *)iocbq;
3160 				abort.q_cnt = 1;
3161 			}
3162 		}
3163 
3164 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3165 
3166 		/* Trigger deferred completion */
3167 		if (abort.q_first) {
3168 			mutex_enter(&rp->rsp_lock);
3169 			if (rp->rsp_head == NULL) {
3170 				rp->rsp_head = (IOCBQ *)abort.q_first;
3171 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3172 			} else {
3173 				rp->rsp_tail->next = (IOCBQ *)abort.q_first;
3174 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3175 			}
3176 			mutex_exit(&rp->rsp_lock);
3177 
3178 			emlxs_thread_trigger2(&rp->intr_thread,
3179 			    emlxs_proc_ring, rp);
3180 
3181 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3182 			    "Forced iotag completion. ring=%d count=%d",
3183 			    ringno, abort.q_cnt);
3184 
3185 			count += abort.q_cnt;
3186 		}
3187 	}
3188 
3189 	return (count);
3190 
3191 }  /* emlxs_iotag_flush() */
3192 
3193 
3194 
3195 /* Checks for IO's on all or a given ring for a given node */
3196 extern uint32_t
3197 emlxs_chipq_node_check(emlxs_port_t *port, RING *ring, NODELIST *ndlp)
3198 {
3199 	emlxs_hba_t *hba = HBA;
3200 	emlxs_buf_t *sbp;
3201 	RING *rp;
3202 	uint32_t ringno;
3203 	uint32_t count;
3204 	uint32_t iotag;
3205 
3206 	count = 0;
3207 
3208 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3209 		rp = &hba->ring[ringno];
3210 
3211 		if (ring && rp != ring) {
3212 			continue;
3213 		}
3214 
3215 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3216 
3217 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3218 			sbp = rp->fc_table[iotag];
3219 
3220 			if (sbp && (sbp != STALE_PACKET) &&
3221 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3222 			    (sbp->node == ndlp) &&
3223 			    (sbp->ring == rp) &&
3224 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3225 				count++;
3226 			}
3227 
3228 		}
3229 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3230 
3231 	}	/* for */
3232 
3233 	return (count);
3234 
3235 }  /* emlxs_chipq_node_check() */
3236 
3237 
3238 
3239 /* Flush all IO's for a given node's lun (FC_FCP_RING only) */
3240 extern uint32_t
3241 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
3242     emlxs_buf_t *fpkt)
3243 {
3244 	emlxs_hba_t *hba = HBA;
3245 	emlxs_buf_t *sbp;
3246 	RING *rp;
3247 	IOCBQ *iocbq;
3248 	IOCBQ *next;
3249 	Q abort;
3250 	uint32_t iotag;
3251 	uint8_t flag[MAX_RINGS];
3252 
3253 	bzero((void *)flag, sizeof (flag));
3254 	bzero((void *)&abort, sizeof (Q));
3255 	rp = &hba->ring[FC_FCP_RING];
3256 
3257 	mutex_enter(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3258 	for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3259 		sbp = rp->fc_table[iotag];
3260 
3261 		if (sbp && (sbp != STALE_PACKET) &&
3262 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3263 		    sbp->node == ndlp &&
3264 		    sbp->ring == rp &&
3265 		    sbp->lun == lun &&
3266 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3267 			emlxs_sbp_abort_add(port, sbp, &abort, flag, fpkt);
3268 		}
3269 	}
3270 	mutex_exit(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3271 
3272 	/* Now put the iocb's on the tx queue */
3273 	iocbq = (IOCBQ *)abort.q_first;
3274 	while (iocbq) {
3275 		/* Save the next iocbq for now */
3276 		next = (IOCBQ *)iocbq->next;
3277 
3278 		/* Unlink this iocbq */
3279 		iocbq->next = NULL;
3280 
3281 		/* Send this iocbq */
3282 		emlxs_tx_put(iocbq, 1);
3283 
3284 		iocbq = next;
3285 	}
3286 
3287 	/* Now trigger ring service */
3288 	if (abort.q_cnt) {
3289 		emlxs_sli_issue_iocb_cmd(hba, rp, 0);
3290 	}
3291 
3292 	return (abort.q_cnt);
3293 
3294 }  /* emlxs_chipq_lun_flush() */
3295 
3296 
3297 
3298 /*
3299  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3300  * This must be called while holding the EMLXS_FCCTAB_LOCK
3301  */
3302 extern IOCBQ *
3303 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3304     uint16_t iotag, RING *rp, uint8_t class, int32_t flag)
3305 {
3306 	emlxs_hba_t *hba = HBA;
3307 	IOCBQ *iocbq;
3308 	IOCB *iocb;
3309 	uint16_t abort_iotag;
3310 
3311 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3312 		return (NULL);
3313 	}
3314 
3315 	iocbq->ring = (void *)rp;
3316 	iocbq->port = (void *)port;
3317 	iocbq->node = (void *)ndlp;
3318 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3319 	iocb = &iocbq->iocb;
3320 
3321 	/*
3322 	 * set up an iotag using special Abort iotags
3323 	 */
3324 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3325 		rp->fc_abort_iotag = rp->max_iotag;
3326 	}
3327 
3328 	abort_iotag = rp->fc_abort_iotag++;
3329 
3330 
3331 	iocb->ulpIoTag = abort_iotag;
3332 	iocb->un.acxri.abortType = flag;
3333 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3334 	iocb->un.acxri.abortIoTag = iotag;
3335 	iocb->ulpLe = 1;
3336 	iocb->ulpClass = class;
3337 	iocb->ulpCommand = CMD_ABORT_XRI_CN;
3338 	iocb->ulpOwner = OWN_CHIP;
3339 
3340 	return (iocbq);
3341 
3342 }  /* emlxs_create_abort_xri_cn() */
3343 
3344 
3345 extern IOCBQ *
3346 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3347     RING *rp, uint8_t class, int32_t flag)
3348 {
3349 	emlxs_hba_t *hba = HBA;
3350 	IOCBQ *iocbq;
3351 	IOCB *iocb;
3352 	uint16_t abort_iotag;
3353 
3354 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3355 		return (NULL);
3356 	}
3357 
3358 	iocbq->ring = (void *)rp;
3359 	iocbq->port = (void *)port;
3360 	iocbq->node = (void *)ndlp;
3361 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3362 	iocb = &iocbq->iocb;
3363 
3364 	/*
3365 	 * set up an iotag using special Abort iotags
3366 	 */
3367 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3368 		rp->fc_abort_iotag = rp->max_iotag;
3369 	}
3370 
3371 	abort_iotag = rp->fc_abort_iotag++;
3372 
3373 	iocb->ulpContext = xid;
3374 	iocb->ulpIoTag = abort_iotag;
3375 	iocb->un.acxri.abortType = flag;
3376 	iocb->ulpLe = 1;
3377 	iocb->ulpClass = class;
3378 	iocb->ulpCommand = CMD_ABORT_XRI_CX;
3379 	iocb->ulpOwner = OWN_CHIP;
3380 
3381 	return (iocbq);
3382 
3383 }  /* emlxs_create_abort_xri_cx() */
3384 
3385 
3386 
3387 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3388 extern IOCBQ *
3389 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3390     uint16_t iotag, RING *rp)
3391 {
3392 	emlxs_hba_t *hba = HBA;
3393 	IOCBQ *iocbq;
3394 	IOCB *iocb;
3395 	uint16_t abort_iotag;
3396 
3397 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3398 		return (NULL);
3399 	}
3400 
3401 	iocbq->ring = (void *)rp;
3402 	iocbq->port = (void *)port;
3403 	iocbq->node = (void *)ndlp;
3404 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3405 	iocb = &iocbq->iocb;
3406 
3407 	/*
3408 	 * set up an iotag using special Abort iotags
3409 	 */
3410 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3411 		rp->fc_abort_iotag = rp->max_iotag;
3412 	}
3413 
3414 	abort_iotag = rp->fc_abort_iotag++;
3415 
3416 	iocb->ulpIoTag = abort_iotag;
3417 	iocb->un.acxri.abortType = 0;
3418 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3419 	iocb->un.acxri.abortIoTag = iotag;
3420 	iocb->ulpLe = 1;
3421 	iocb->ulpClass = 0;
3422 	iocb->ulpCommand = CMD_CLOSE_XRI_CN;
3423 	iocb->ulpOwner = OWN_CHIP;
3424 
3425 	return (iocbq);
3426 
3427 }  /* emlxs_create_close_xri_cn() */
3428 
3429 
3430 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3431 extern IOCBQ *
3432 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3433     RING *rp)
3434 {
3435 	emlxs_hba_t *hba = HBA;
3436 	IOCBQ *iocbq;
3437 	IOCB *iocb;
3438 	uint16_t abort_iotag;
3439 
3440 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3441 		return (NULL);
3442 	}
3443 
3444 	iocbq->ring = (void *)rp;
3445 	iocbq->port = (void *)port;
3446 	iocbq->node = (void *)ndlp;
3447 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3448 	iocb = &iocbq->iocb;
3449 
3450 	/*
3451 	 * set up an iotag using special Abort iotags
3452 	 */
3453 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3454 		rp->fc_abort_iotag = rp->max_iotag;
3455 	}
3456 
3457 	abort_iotag = rp->fc_abort_iotag++;
3458 
3459 	iocb->ulpContext = xid;
3460 	iocb->ulpIoTag = abort_iotag;
3461 	iocb->ulpLe = 1;
3462 	iocb->ulpClass = 0;
3463 	iocb->ulpCommand = CMD_CLOSE_XRI_CX;
3464 	iocb->ulpOwner = OWN_CHIP;
3465 
3466 	return (iocbq);
3467 
3468 }  /* emlxs_create_close_xri_cx() */
3469 
3470 
3471 void
3472 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
3473 {
3474 	RING *rp;
3475 	IOCBQ *iocbq;
3476 
3477 	rp = &hba->ring[FC_CT_RING];
3478 
3479 	/* Create the abort IOCB */
3480 	if (hba->state >= FC_LINK_UP) {
3481 		iocbq =
3482 		    emlxs_create_abort_xri_cx(port, NULL, rxid, rp, CLASS3,
3483 		    ABORT_TYPE_ABTS);
3484 	} else {
3485 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, rp);
3486 	}
3487 	if (iocbq) {
3488 		emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
3489 	}
3490 }
3491 
3492 
3493 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3494 static void
3495 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
3496     uint8_t *flag, emlxs_buf_t *fpkt)
3497 {
3498 	emlxs_hba_t *hba = HBA;
3499 	IOCBQ *iocbq;
3500 	RING *rp;
3501 	NODELIST *ndlp;
3502 
3503 	rp = (RING *)sbp->ring;
3504 	ndlp = sbp->node;
3505 
3506 	/* Create the close XRI IOCB */
3507 	iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, rp);
3508 
3509 	/*
3510 	 * Add this iocb to our local abort Q
3511 	 * This way we don't hold the CHIPQ lock too long
3512 	 */
3513 	if (iocbq) {
3514 		if (abort->q_first) {
3515 			((IOCBQ *)abort->q_last)->next = iocbq;
3516 			abort->q_last = (uint8_t *)iocbq;
3517 			abort->q_cnt++;
3518 		} else {
3519 			abort->q_first = (uint8_t *)iocbq;
3520 			abort->q_last = (uint8_t *)iocbq;
3521 			abort->q_cnt = 1;
3522 		}
3523 		iocbq->next = NULL;
3524 	}
3525 
3526 	/* set the flags */
3527 	mutex_enter(&sbp->mtx);
3528 
3529 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
3530 	sbp->ticks = hba->timer_tics + 10;
3531 	sbp->abort_attempts++;
3532 
3533 	flag[rp->ringno] = 1;
3534 
3535 	/*
3536 	 * If the fpkt is already set, then we will leave it alone
3537 	 * This ensures that this pkt is only accounted for on one
3538 	 * fpkt->flush_count
3539 	 */
3540 	if (!sbp->fpkt && fpkt) {
3541 		mutex_enter(&fpkt->mtx);
3542 		sbp->fpkt = fpkt;
3543 		fpkt->flush_count++;
3544 		mutex_exit(&fpkt->mtx);
3545 	}
3546 
3547 	mutex_exit(&sbp->mtx);
3548 
3549 	return;
3550 
3551 }  /* emlxs_sbp_abort_add() */
3552