1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_FCP_C);
31 
32 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 	getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34 
35 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37 static uint32_t	emlxs_iotag_flush(emlxs_hba_t *hba);
38 
39 /*
40  * This routine copies data from src then potentially swaps the destination to
41  * big endian. Assumes cnt is a multiple of sizeof(uint32_t).
42  */
43 extern void
44 emlxs_pcimem_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
45 {
46 	uint32_t ldata;
47 	int32_t i;
48 
49 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
50 		ldata = *src++;
51 		ldata = PCIMEM_LONG(ldata);
52 		*dest++ = ldata;
53 	}
54 }  /* emlxs_pcimem_bcopy */
55 
56 
57 /*
58  * This routine copies data from src then swaps the destination to big endian.
59  * Assumes cnt is a multiple of sizeof(uint32_t).
60  */
61 extern void
62 emlxs_swap_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
63 {
64 	uint32_t ldata;
65 	int32_t i;
66 
67 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
68 		ldata = *src++;
69 		ldata = SWAP_DATA32(ldata);
70 		*dest++ = ldata;
71 	}
72 }  /* End fc_swap_bcopy */
73 
74 
75 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
76 #define	SCSI_INQUIRY			0x12
77 #define	SCSI_RX_DIAG    		0x1C
78 
79 
80 /*
81  *  emlxs_handle_fcp_event
82  *
83  *  Description: Process an FCP Rsp Ring completion
84  *
85  */
86 /* ARGSUSED */
87 extern void
88 emlxs_handle_fcp_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
89 {
90 	emlxs_port_t *port = &PPORT;
91 	IOCB *cmd;
92 	emlxs_buf_t *sbp;
93 	fc_packet_t *pkt = NULL;
94 #ifdef SAN_DIAG_SUPPORT
95 	NODELIST *ndlp;
96 #endif
97 	uint32_t iostat;
98 	uint8_t localstat;
99 	fcp_rsp_t *rsp;
100 	uint32_t rsp_data_resid;
101 	uint32_t check_underrun;
102 	uint8_t asc;
103 	uint8_t ascq;
104 	uint8_t scsi_status;
105 	uint8_t sense;
106 	uint32_t did;
107 	uint32_t fix_it;
108 	uint8_t *scsi_cmd;
109 	uint8_t scsi_opcode;
110 	uint16_t scsi_dl;
111 	uint32_t data_rx;
112 
113 	cmd = &iocbq->iocb;
114 
115 	/* Initialize the status */
116 	iostat = cmd->ulpStatus;
117 	localstat = 0;
118 	scsi_status = 0;
119 	asc = 0;
120 	ascq = 0;
121 	sense = 0;
122 	check_underrun = 0;
123 	fix_it = 0;
124 
125 	HBASTATS.FcpEvent++;
126 
127 	sbp = (emlxs_buf_t *)iocbq->sbp;
128 
129 	if (!sbp) {
130 		/* completion with missing xmit command */
131 		HBASTATS.FcpStray++;
132 
133 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
134 		    "cmd=%x iotag=%x", cmd->ulpCommand, cmd->ulpIoTag);
135 
136 		return;
137 	}
138 
139 	HBASTATS.FcpCompleted++;
140 
141 #ifdef SAN_DIAG_SUPPORT
142 	emlxs_update_sd_bucket(sbp);
143 #endif /* SAN_DIAG_SUPPORT */
144 
145 	pkt = PRIV2PKT(sbp);
146 
147 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
148 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
149 	scsi_opcode = scsi_cmd[12];
150 	data_rx = 0;
151 
152 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
153 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
154 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
155 		    DDI_DMA_SYNC_FORKERNEL);
156 
157 #ifdef TEST_SUPPORT
158 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
159 		    (pkt->pkt_datalen >= 512)) {
160 			hba->underrun_counter--;
161 			iostat = IOSTAT_FCP_RSP_ERROR;
162 
163 			/* Report 512 bytes missing by adapter */
164 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
165 
166 			/* Corrupt 512 bytes of Data buffer */
167 			bzero((uint8_t *)pkt->pkt_data, 512);
168 
169 			/* Set FCP response to STATUS_GOOD */
170 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
171 		}
172 #endif /* TEST_SUPPORT */
173 	}
174 
175 	/* Process the pkt */
176 	mutex_enter(&sbp->mtx);
177 
178 	/* Check for immediate return */
179 	if ((iostat == IOSTAT_SUCCESS) &&
180 	    (pkt->pkt_comp) &&
181 	    !(sbp->pkt_flags &
182 	    (PACKET_RETURNED | PACKET_COMPLETED |
183 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
184 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
185 	    PACKET_IN_ABORT | PACKET_POLLED))) {
186 		HBASTATS.FcpGood++;
187 
188 		sbp->pkt_flags |=
189 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
190 		    PACKET_COMPLETED | PACKET_RETURNED);
191 		mutex_exit(&sbp->mtx);
192 
193 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
194 		emlxs_unswap_pkt(sbp);
195 #endif /* EMLXS_MODREV2X */
196 
197 		(*pkt->pkt_comp) (pkt);
198 
199 		return;
200 	}
201 
202 	/*
203 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
204 	 * is reported.
205 	 */
206 
207 	/* Check if a response buffer was provided */
208 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
209 		emlxs_mpdata_sync(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
210 		    DDI_DMA_SYNC_FORKERNEL);
211 
212 		/* Get the response buffer pointer */
213 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
214 
215 		/* Set the valid response flag */
216 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
217 
218 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
219 
220 #ifdef SAN_DIAG_SUPPORT
221 		ndlp = (NODELIST *)iocbq->node;
222 		if (scsi_status == SCSI_STAT_QUE_FULL) {
223 			emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
224 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
225 		} else if (scsi_status == SCSI_STAT_BUSY) {
226 			emlxs_log_sd_scsi_event(port,
227 			    SD_SCSI_SUBCATEGORY_DEVBSY,
228 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
229 		}
230 #endif
231 
232 		/*
233 		 * Convert a task abort to a check condition with no data
234 		 * transferred. We saw a data corruption when Solaris received
235 		 * a Task Abort from a tape.
236 		 */
237 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
238 			EMLXS_MSGF(EMLXS_CONTEXT,
239 			    &emlxs_fcp_completion_error_msg,
240 			    "Task Abort. "
241 			    "Fixed.did=0x%06x sbp=%p cmd=%02x dl=%d",
242 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
243 
244 			rsp->fcp_u.fcp_status.scsi_status =
245 			    SCSI_STAT_CHECK_COND;
246 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
247 			rsp->fcp_u.fcp_status.sense_len_set = 0;
248 			rsp->fcp_u.fcp_status.resid_over = 0;
249 
250 			if (pkt->pkt_datalen) {
251 				rsp->fcp_u.fcp_status.resid_under = 1;
252 				rsp->fcp_resid =
253 				    SWAP_DATA32(pkt->pkt_datalen);
254 			} else {
255 				rsp->fcp_u.fcp_status.resid_under = 0;
256 				rsp->fcp_resid = 0;
257 			}
258 
259 			scsi_status = SCSI_STAT_CHECK_COND;
260 		}
261 
262 		/*
263 		 * We only need to check underrun if data could
264 		 * have been sent
265 		 */
266 
267 		/* Always check underrun if status is good */
268 		if (scsi_status == SCSI_STAT_GOOD) {
269 			check_underrun = 1;
270 		}
271 		/* Check the sense codes if this is a check condition */
272 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
273 			check_underrun = 1;
274 
275 			/* Check if sense data was provided */
276 			if (SWAP_DATA32(rsp->fcp_sense_len) >= 14) {
277 				sense = *((uint8_t *)rsp + 32 + 2);
278 				asc = *((uint8_t *)rsp + 32 + 12);
279 				ascq = *((uint8_t *)rsp + 32 + 13);
280 			}
281 
282 #ifdef SAN_DIAG_SUPPORT
283 			emlxs_log_sd_scsi_check_event(port,
284 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
285 			    scsi_opcode, sense, asc, ascq);
286 #endif
287 		}
288 		/* Status is not good and this is not a check condition */
289 		/* No data should have been sent */
290 		else {
291 			check_underrun = 0;
292 		}
293 
294 		/* Get the residual underrun count reported by the SCSI reply */
295 		rsp_data_resid = (pkt->pkt_datalen &&
296 		    rsp->fcp_u.fcp_status.resid_under) ? SWAP_DATA32(rsp->
297 		    fcp_resid) : 0;
298 
299 		/* Set the pkt resp_resid field */
300 		pkt->pkt_resp_resid = 0;
301 
302 		/* Set the pkt data_resid field */
303 		if (pkt->pkt_datalen &&
304 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
305 			/*
306 			 * Get the residual underrun count reported by
307 			 * our adapter
308 			 */
309 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
310 
311 #ifdef SAN_DIAG_SUPPORT
312 			if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
313 				emlxs_log_sd_fc_rdchk_event(port,
314 				    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
315 				    scsi_opcode, pkt->pkt_data_resid);
316 			}
317 #endif
318 
319 			/* Get the actual amount of data transferred */
320 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
321 
322 			/*
323 			 * If the residual being reported by the adapter is
324 			 * greater than the residual being reported in the
325 			 * reply, then we have a true underrun.
326 			 */
327 			if (check_underrun &&
328 			    (pkt->pkt_data_resid > rsp_data_resid)) {
329 				switch (scsi_opcode) {
330 				case SCSI_INQUIRY:
331 					scsi_dl = scsi_cmd[16];
332 					break;
333 
334 				case SCSI_RX_DIAG:
335 					scsi_dl =
336 					    (scsi_cmd[15] * 0x100) +
337 					    scsi_cmd[16];
338 					break;
339 
340 				default:
341 					scsi_dl = pkt->pkt_datalen;
342 				}
343 
344 #ifdef FCP_UNDERRUN_PATCH1
345 				/*
346 				 * If status is not good and no data was
347 				 * actually transferred, then we must fix
348 				 * the issue
349 				 */
350 				if ((scsi_status != SCSI_STAT_GOOD) &&
351 				    (data_rx == 0)) {
352 					fix_it = 1;
353 
354 					EMLXS_MSGF(EMLXS_CONTEXT,
355 					    &emlxs_fcp_completion_error_msg,
356 					    "Underrun(1). Fixed. "
357 					    "did=0x%06x sbp=%p cmd=%02x "
358 					    "dl=%d,%d rx=%d rsp=%d",
359 					    did, sbp, scsi_opcode,
360 					    pkt->pkt_datalen, scsi_dl,
361 					    (pkt->pkt_datalen -
362 					    cmd->un.fcpi.fcpi_parm),
363 					    rsp_data_resid);
364 
365 				}
366 #endif /* FCP_UNDERRUN_PATCH1 */
367 
368 
369 #ifdef FCP_UNDERRUN_PATCH2
370 				if ((scsi_status == SCSI_STAT_GOOD)) {
371 					emlxs_msg_t	*msg;
372 
373 					msg = &emlxs_fcp_completion_error_msg;
374 					/*
375 					 * If status is good and this is an
376 					 * inquiry request and the amount of
377 					 * data
378 					 */
379 					/*
380 					 * requested <= data received, then we
381 					 * must fix the issue.
382 					 */
383 
384 					if ((scsi_opcode == SCSI_INQUIRY) &&
385 					    (pkt->pkt_datalen >= data_rx) &&
386 					    (scsi_dl <= data_rx)) {
387 						fix_it = 1;
388 
389 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
390 						    "Underrun(2). Fixed. "
391 						    "did=0x%06x sbp=%p "
392 						    "cmd=%02x dl=%d,%d "
393 						    "rx=%d rsp=%d",
394 						    did, sbp, scsi_opcode,
395 						    pkt->pkt_datalen, scsi_dl,
396 						    data_rx, rsp_data_resid);
397 
398 					}
399 
400 					/*
401 					 * If status is good and this is an
402 					 * inquiry request and the amount of
403 					 * data requested >= 128 bytes, but
404 					 * only 128 bytes were received,
405 					 * then we must fix the issue.
406 					 */
407 					else if ((scsi_opcode ==
408 					    SCSI_INQUIRY) &&
409 					    (pkt->pkt_datalen >= 128) &&
410 					    (scsi_dl >= 128) &&
411 					    (data_rx == 128)) {
412 						fix_it = 1;
413 
414 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
415 						    "Underrun(3). Fixed. "
416 						    "did=0x%06x sbp=%p "
417 						    "cmd=%02x dl=%d,%d "
418 						    "rx=%d rsp=%d",
419 						    did, sbp, scsi_opcode,
420 						    pkt->pkt_datalen, scsi_dl,
421 						    data_rx, rsp_data_resid);
422 
423 					}
424 
425 				}
426 #endif /* FCP_UNDERRUN_PATCH2 */
427 
428 				/*
429 				 * Check if SCSI response payload should be
430 				 * fixed or if a DATA_UNDERRUN should be
431 				 * reported
432 				 */
433 				if (fix_it) {
434 					/*
435 					 * Fix the SCSI response payload itself
436 					 */
437 					rsp->fcp_u.fcp_status.resid_under = 1;
438 					rsp->fcp_resid =
439 					    SWAP_DATA32(pkt->pkt_data_resid);
440 				} else {
441 					/*
442 					 * Change the status from
443 					 * IOSTAT_FCP_RSP_ERROR to
444 					 * IOSTAT_DATA_UNDERRUN
445 					 */
446 					iostat = IOSTAT_DATA_UNDERRUN;
447 					pkt->pkt_data_resid =
448 					    pkt->pkt_datalen;
449 				}
450 			}
451 
452 			/*
453 			 * If the residual being reported by the adapter is
454 			 * less than the residual being reported in the reply,
455 			 * then we have a true overrun. Since we don't know
456 			 * where the extra data came from or went to then we
457 			 * cannot trust anything we received
458 			 */
459 			else if (rsp_data_resid > pkt->pkt_data_resid) {
460 				/*
461 				 * Change the status from
462 				 * IOSTAT_FCP_RSP_ERROR to
463 				 * IOSTAT_DATA_OVERRUN
464 				 */
465 				iostat = IOSTAT_DATA_OVERRUN;
466 				pkt->pkt_data_resid = pkt->pkt_datalen;
467 			}
468 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
469 
470 			/* Report whatever the target reported */
471 			pkt->pkt_data_resid = rsp_data_resid;
472 		}
473 	}
474 
475 	/*
476 	 * If pkt is tagged for timeout then set the return codes
477 	 * appropriately
478 	 */
479 	if (sbp->pkt_flags & PACKET_IN_TIMEOUT) {
480 		iostat = IOSTAT_LOCAL_REJECT;
481 		localstat = IOERR_ABORT_TIMEOUT;
482 		goto done;
483 	}
484 
485 	/* If pkt is tagged for abort then set the return codes appropriately */
486 	if (sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_IN_ABORT)) {
487 		iostat = IOSTAT_LOCAL_REJECT;
488 		localstat = IOERR_ABORT_REQUESTED;
489 		goto done;
490 	}
491 
492 	/* Print completion message */
493 	switch (iostat) {
494 	case IOSTAT_SUCCESS:
495 		/* Build SCSI GOOD status */
496 		if (pkt->pkt_rsplen) {
497 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
498 		}
499 		break;
500 
501 	case IOSTAT_FCP_RSP_ERROR:
502 		break;
503 
504 	case IOSTAT_REMOTE_STOP:
505 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
506 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
507 		    scsi_opcode);
508 		break;
509 
510 	case IOSTAT_LOCAL_REJECT:
511 		localstat = cmd->un.grsp.perr.statLocalError;
512 
513 		switch (localstat) {
514 		case IOERR_SEQUENCE_TIMEOUT:
515 			EMLXS_MSGF(EMLXS_CONTEXT,
516 			    &emlxs_fcp_completion_error_msg,
517 			    "Local reject. "
518 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
519 			    emlxs_error_xlate(localstat), did, sbp,
520 			    scsi_opcode, pkt->pkt_timeout);
521 			break;
522 
523 		default:
524 			EMLXS_MSGF(EMLXS_CONTEXT,
525 			    &emlxs_fcp_completion_error_msg,
526 			    "Local reject. %s did=0x%06x sbp=%p cmd=%02x",
527 			    emlxs_error_xlate(localstat), did, sbp,
528 			    scsi_opcode);
529 		}
530 
531 		break;
532 
533 	case IOSTAT_NPORT_RJT:
534 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
535 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
536 		    scsi_opcode);
537 		break;
538 
539 	case IOSTAT_FABRIC_RJT:
540 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
541 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
542 		    scsi_opcode);
543 		break;
544 
545 	case IOSTAT_NPORT_BSY:
546 #ifdef SAN_DIAG_SUPPORT
547 		ndlp = (NODELIST *)iocbq->node;
548 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
549 #endif
550 
551 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
552 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
553 		    scsi_opcode);
554 		break;
555 
556 	case IOSTAT_FABRIC_BSY:
557 #ifdef SAN_DIAG_SUPPORT
558 		ndlp = (NODELIST *)iocbq->node;
559 		emlxs_log_sd_fc_bsy_event(port, NULL);
560 #endif
561 
562 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
563 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
564 		    scsi_opcode);
565 		break;
566 
567 	case IOSTAT_INTERMED_RSP:
568 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
569 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
570 		    sbp, scsi_opcode);
571 		break;
572 
573 	case IOSTAT_LS_RJT:
574 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
575 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
576 		    scsi_opcode);
577 		break;
578 
579 	case IOSTAT_DATA_UNDERRUN:
580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
581 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
582 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
583 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
584 		    rsp_data_resid, scsi_status, sense, asc, ascq);
585 		break;
586 
587 	case IOSTAT_DATA_OVERRUN:
588 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
589 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
590 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
591 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
592 		    rsp_data_resid, scsi_status, sense, asc, ascq);
593 		break;
594 
595 	default:
596 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
597 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
598 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
599 		    scsi_opcode);
600 		break;
601 	}
602 
603 done:
604 
605 	if (iostat == IOSTAT_SUCCESS) {
606 		HBASTATS.FcpGood++;
607 	} else {
608 		HBASTATS.FcpError++;
609 	}
610 
611 	mutex_exit(&sbp->mtx);
612 
613 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
614 
615 	return;
616 
617 }  /* emlxs_handle_fcp_event() */
618 
619 
620 
621 /*
622  *  emlxs_post_buffer
623  *
624  *  This routine will post count buffers to the
625  *  ring with the QUE_RING_BUF_CN command. This
626  *  allows 2 buffers / command to be posted.
627  *  Returns the number of buffers NOT posted.
628  */
629 extern int
630 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
631 {
632 	emlxs_port_t *port = &PPORT;
633 	IOCB *icmd;
634 	IOCBQ *iocbq;
635 	MATCHMAP *mp;
636 	uint16_t tag;
637 	uint32_t maxqbuf;
638 	int32_t i;
639 	int32_t j;
640 	uint32_t seg;
641 	uint32_t size;
642 
643 	mp = 0;
644 	maxqbuf = 2;
645 	tag = (uint16_t)cnt;
646 	cnt += rp->fc_missbufcnt;
647 
648 	if (rp->ringno == FC_ELS_RING) {
649 		seg = MEM_BUF;
650 		size = MEM_ELSBUF_SIZE;
651 	} else if (rp->ringno == FC_IP_RING) {
652 		seg = MEM_IPBUF;
653 		size = MEM_IPBUF_SIZE;
654 	} else if (rp->ringno == FC_CT_RING) {
655 		seg = MEM_CTBUF;
656 		size = MEM_CTBUF_SIZE;
657 	}
658 #ifdef SFCT_SUPPORT
659 	else if (rp->ringno == FC_FCT_RING) {
660 		seg = MEM_FCTBUF;
661 		size = MEM_FCTBUF_SIZE;
662 	}
663 #endif /* SFCT_SUPPORT */
664 	else {
665 		return (0);
666 	}
667 
668 	/*
669 	 * While there are buffers to post
670 	 */
671 	while (cnt) {
672 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
673 			rp->fc_missbufcnt = cnt;
674 			return (cnt);
675 		}
676 
677 		iocbq->ring = (void *)rp;
678 		iocbq->port = (void *)port;
679 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
680 
681 		icmd = &iocbq->iocb;
682 
683 		/*
684 		 * Max buffers can be posted per command
685 		 */
686 		for (i = 0; i < maxqbuf; i++) {
687 			if (cnt <= 0)
688 				break;
689 
690 			/* fill in BDEs for command */
691 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg)) == 0) {
692 				icmd->ulpBdeCount = i;
693 				for (j = 0; j < i; j++) {
694 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
695 					if (mp) {
696 						(void) emlxs_mem_put(hba, seg,
697 						    (uint8_t *)mp);
698 					}
699 				}
700 
701 				rp->fc_missbufcnt = cnt + i;
702 
703 				(void) emlxs_mem_put(hba, MEM_IOCB,
704 				    (uint8_t *)iocbq);
705 
706 				return (cnt + i);
707 			}
708 
709 			/*
710 			 * map that page and save the address pair for lookup
711 			 * later
712 			 */
713 			emlxs_mem_map_vaddr(hba,
714 			    rp,
715 			    mp,
716 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
717 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
718 
719 			icmd->un.cont64[i].tus.f.bdeSize = size;
720 			icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
721 
722 			/*
723 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
724 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
725 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
726 			 *    icmd->un.cont64[i].addrLow, size);
727 			 */
728 
729 			cnt--;
730 		}
731 
732 		icmd->ulpIoTag = tag;
733 		icmd->ulpBdeCount = i;
734 		icmd->ulpLe = 1;
735 		icmd->ulpOwner = OWN_CHIP;
736 		/* used for delimiter between commands */
737 		iocbq->bp = (uint8_t *)mp;
738 
739 		emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
740 	}
741 
742 	rp->fc_missbufcnt = 0;
743 
744 	return (0);
745 
746 }  /* emlxs_post_buffer() */
747 
748 
749 extern int
750 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
751 {
752 	emlxs_hba_t *hba = HBA;
753 	emlxs_config_t *cfg;
754 	NODELIST *nlp;
755 	fc_affected_id_t *aid;
756 	uint32_t mask;
757 	uint32_t aff_d_id;
758 	uint32_t linkdown;
759 	uint32_t vlinkdown;
760 	uint32_t action;
761 	int i;
762 	uint32_t unreg_vpi;
763 	uint32_t update;
764 	uint32_t adisc_support;
765 	uint8_t format;
766 
767 	/* Target mode only uses this routine for linkdowns */
768 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
769 		return (0);
770 	}
771 
772 	cfg = &CFG;
773 	aid = (fc_affected_id_t *)&scope;
774 	linkdown = 0;
775 	vlinkdown = 0;
776 	unreg_vpi = 0;
777 	update = 0;
778 
779 	if (!(port->flag & EMLXS_PORT_BOUND)) {
780 		return (0);
781 	}
782 
783 	format = aid->aff_format;
784 
785 	switch (format) {
786 	case 0:	/* Port */
787 		mask = 0x00ffffff;
788 		break;
789 
790 	case 1:	/* Area */
791 		mask = 0x00ffff00;
792 		break;
793 
794 	case 2:	/* Domain */
795 		mask = 0x00ff0000;
796 		break;
797 
798 	case 3:	/* Network */
799 		mask = 0x00000000;
800 		break;
801 
802 #ifdef DHCHAP_SUPPORT
803 	case 0xfe:	/* Virtual link down */
804 		mask = 0x00000000;
805 		vlinkdown = 1;
806 		break;
807 #endif /* DHCHAP_SUPPORT */
808 
809 	case 0xff:	/* link is down */
810 		mask = 0x00000000;
811 		linkdown = 1;
812 		break;
813 
814 	}
815 
816 	aff_d_id = aid->aff_d_id & mask;
817 
818 
819 	/*
820 	 * If link is down then this is a hard shutdown and flush
821 	 * If link not down then this is a soft shutdown and flush
822 	 * (e.g. RSCN)
823 	 */
824 	if (linkdown) {
825 		mutex_enter(&EMLXS_PORT_LOCK);
826 
827 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
828 		port->prev_did = port->did;
829 		port->did = 0;
830 
831 		if (port->ulp_statec != FC_STATE_OFFLINE) {
832 			port->ulp_statec = FC_STATE_OFFLINE;
833 			update = 1;
834 		}
835 
836 		mutex_exit(&EMLXS_PORT_LOCK);
837 
838 		/* Tell ULP about it */
839 		if (update) {
840 			if (port->flag & EMLXS_PORT_BOUND) {
841 				if (port->vpi == 0) {
842 					EMLXS_MSGF(EMLXS_CONTEXT,
843 					    &emlxs_link_down_msg, NULL);
844 				}
845 
846 				if (port->ini_mode) {
847 					port->ulp_statec_cb(port->ulp_handle,
848 					    FC_STATE_OFFLINE);
849 				}
850 #ifdef SFCT_SUPPORT
851 				else if (port->tgt_mode) {
852 					emlxs_fct_link_down(port);
853 				}
854 #endif /* SFCT_SUPPORT */
855 
856 			} else {
857 				if (port->vpi == 0) {
858 					EMLXS_MSGF(EMLXS_CONTEXT,
859 					    &emlxs_link_down_msg, "*");
860 				}
861 			}
862 
863 
864 		}
865 
866 		unreg_vpi = 1;
867 
868 #ifdef DHCHAP_SUPPORT
869 		/* Stop authentication with all nodes */
870 		emlxs_dhc_auth_stop(port, NULL);
871 #endif /* DHCHAP_SUPPORT */
872 
873 		/* Flush the base node */
874 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
875 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
876 
877 		/* Flush any pending ub buffers */
878 		emlxs_ub_flush(port);
879 	}
880 #ifdef DHCHAP_SUPPORT
881 	/* virtual link down */
882 	else if (vlinkdown) {
883 		mutex_enter(&EMLXS_PORT_LOCK);
884 
885 		if (port->ulp_statec != FC_STATE_OFFLINE) {
886 			port->ulp_statec = FC_STATE_OFFLINE;
887 			update = 1;
888 		}
889 
890 		mutex_exit(&EMLXS_PORT_LOCK);
891 
892 		/* Tell ULP about it */
893 		if (update) {
894 			if (port->flag & EMLXS_PORT_BOUND) {
895 				if (port->vpi == 0) {
896 					EMLXS_MSGF(EMLXS_CONTEXT,
897 					    &emlxs_link_down_msg,
898 					    "Switch authentication failed.");
899 				}
900 
901 #ifdef SFCT_SUPPORT
902 				if (port->tgt_mode) {
903 					emlxs_fct_link_down(port);
904 
905 				} else if (port->ini_mode) {
906 					port->ulp_statec_cb(port->ulp_handle,
907 					    FC_STATE_OFFLINE);
908 				}
909 #else
910 				port->ulp_statec_cb(port->ulp_handle,
911 				    FC_STATE_OFFLINE);
912 #endif	/* SFCT_SUPPORT */
913 			} else {
914 				if (port->vpi == 0) {
915 					EMLXS_MSGF(EMLXS_CONTEXT,
916 					    &emlxs_link_down_msg,
917 					    "Switch authentication failed. *");
918 				}
919 			}
920 
921 
922 		}
923 
924 		/* Flush the base node */
925 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
926 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
927 	}
928 #endif /* DHCHAP_SUPPORT */
929 
930 	if (port->tgt_mode) {
931 		goto done;
932 	}
933 
934 	/* Set the node tags */
935 	/* We will process all nodes with this tag */
936 	rw_enter(&port->node_rwlock, RW_READER);
937 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
938 		nlp = port->node_table[i];
939 		while (nlp != NULL) {
940 			nlp->nlp_tag = 1;
941 			nlp = nlp->nlp_list_next;
942 		}
943 	}
944 	rw_exit(&port->node_rwlock);
945 
946 	if (hba->flag & FC_ONLINE_MODE) {
947 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
948 	} else {
949 		adisc_support = 0;
950 	}
951 
952 	/* Check ADISC support level */
953 	switch (adisc_support) {
954 	case 0:	/* No support - Flush all IO to all matching nodes */
955 
956 		for (;;) {
957 			/*
958 			 * We need to hold the locks this way because
959 			 * emlxs_mb_unreg_did and the flush routines enter the
960 			 * same locks. Also, when we release the lock the list
961 			 * can change out from under us.
962 			 */
963 
964 			/* Find first node */
965 			rw_enter(&port->node_rwlock, RW_READER);
966 			action = 0;
967 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
968 				nlp = port->node_table[i];
969 				while (nlp != NULL) {
970 					if (!nlp->nlp_tag) {
971 						nlp = nlp->nlp_list_next;
972 						continue;
973 					}
974 					nlp->nlp_tag = 0;
975 
976 					/*
977 					 * Check for any device that matches
978 					 * our mask
979 					 */
980 					if ((nlp->nlp_DID & mask) == aff_d_id) {
981 						if (linkdown) {
982 							action = 1;
983 							break;
984 						} else { /* Must be an RCSN */
985 
986 							action = 2;
987 							break;
988 						}
989 					}
990 					nlp = nlp->nlp_list_next;
991 				}
992 
993 				if (action) {
994 					break;
995 				}
996 			}
997 			rw_exit(&port->node_rwlock);
998 
999 
1000 			/* Check if nothing was found */
1001 			if (action == 0) {
1002 				break;
1003 			} else if (action == 1) {
1004 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1005 				    NULL, NULL, NULL);
1006 			} else if (action == 2) {
1007 #ifdef DHCHAP_SUPPORT
1008 				emlxs_dhc_auth_stop(port, nlp);
1009 #endif /* DHCHAP_SUPPORT */
1010 
1011 				/*
1012 				 * Close the node for any further normal IO
1013 				 * A PLOGI with reopen the node
1014 				 */
1015 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1016 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1017 
1018 				/* Flush tx queue */
1019 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1020 
1021 				/* Flush chip queue */
1022 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1023 			}
1024 
1025 		}
1026 
1027 		break;
1028 
1029 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
1030 
1031 		for (;;) {
1032 
1033 			/*
1034 			 * We need to hold the locks this way because
1035 			 * emlxs_mb_unreg_did and the flush routines enter the
1036 			 * same locks. Also, when we release the lock the list
1037 			 * can change out from under us.
1038 			 */
1039 			rw_enter(&port->node_rwlock, RW_READER);
1040 			action = 0;
1041 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1042 				nlp = port->node_table[i];
1043 				while (nlp != NULL) {
1044 					if (!nlp->nlp_tag) {
1045 						nlp = nlp->nlp_list_next;
1046 						continue;
1047 					}
1048 					nlp->nlp_tag = 0;
1049 
1050 					/*
1051 					 * Check for special FCP2 target device
1052 					 * that matches our mask
1053 					 */
1054 					if ((nlp->nlp_fcp_info &
1055 					    NLP_FCP_TGT_DEVICE) &&
1056 					    (nlp-> nlp_fcp_info &
1057 					    NLP_FCP_2_DEVICE) &&
1058 					    (nlp->nlp_DID & mask) ==
1059 					    aff_d_id) {
1060 						action = 3;
1061 						break;
1062 					}
1063 
1064 					/*
1065 					 * Check for any other device that
1066 					 * matches our mask
1067 					 */
1068 					else if ((nlp->nlp_DID & mask) ==
1069 					    aff_d_id) {
1070 						if (linkdown) {
1071 							action = 1;
1072 							break;
1073 						} else { /* Must be an RSCN */
1074 
1075 							action = 2;
1076 							break;
1077 						}
1078 					}
1079 
1080 					nlp = nlp->nlp_list_next;
1081 				}
1082 
1083 				if (action) {
1084 					break;
1085 				}
1086 			}
1087 			rw_exit(&port->node_rwlock);
1088 
1089 			/* Check if nothing was found */
1090 			if (action == 0) {
1091 				break;
1092 			} else if (action == 1) {
1093 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1094 				    NULL, NULL, NULL);
1095 			} else if (action == 2) {
1096 #ifdef DHCHAP_SUPPORT
1097 				emlxs_dhc_auth_stop(port, nlp);
1098 #endif /* DHCHAP_SUPPORT */
1099 
1100 				/*
1101 				 * Close the node for any further normal IO
1102 				 * A PLOGI with reopen the node
1103 				 */
1104 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1105 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1106 
1107 				/* Flush tx queue */
1108 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1109 
1110 				/* Flush chip queue */
1111 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1112 
1113 			} else if (action == 3) {	/* FCP2 devices */
1114 				unreg_vpi = 0;
1115 
1116 #ifdef DHCHAP_SUPPORT
1117 				emlxs_dhc_auth_stop(port, nlp);
1118 #endif /* DHCHAP_SUPPORT */
1119 
1120 				/*
1121 				 * Close the node for any further normal IO
1122 				 * An ADISC or a PLOGI with reopen the node
1123 				 */
1124 				emlxs_node_close(port, nlp, FC_FCP_RING, -1);
1125 				emlxs_node_close(port, nlp, FC_IP_RING,
1126 				    ((linkdown) ? 0 : 60));
1127 
1128 				/* Flush tx queues except for FCP ring */
1129 				(void) emlxs_tx_node_flush(port, nlp,
1130 				    &hba->ring[FC_CT_RING], 0, 0);
1131 				(void) emlxs_tx_node_flush(port, nlp,
1132 				    &hba->ring[FC_ELS_RING], 0, 0);
1133 				(void) emlxs_tx_node_flush(port, nlp,
1134 				    &hba->ring[FC_IP_RING], 0, 0);
1135 
1136 				/* Flush chip queues except for FCP ring */
1137 				(void) emlxs_chipq_node_flush(port,
1138 				    &hba->ring[FC_CT_RING], nlp, 0);
1139 				(void) emlxs_chipq_node_flush(port,
1140 				    &hba->ring[FC_ELS_RING], nlp, 0);
1141 				(void) emlxs_chipq_node_flush(port,
1142 				    &hba->ring[FC_IP_RING], nlp, 0);
1143 			}
1144 		}
1145 		break;
1146 
1147 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1148 
1149 		if (!linkdown && !vlinkdown) {
1150 			break;
1151 		}
1152 
1153 		for (;;) {
1154 			/*
1155 			 * We need to hold the locks this way because
1156 			 * emlxs_mb_unreg_did and the flush routines enter the
1157 			 * same locks. Also, when we release the lock the list
1158 			 * can change out from under us.
1159 			 */
1160 			rw_enter(&port->node_rwlock, RW_READER);
1161 			action = 0;
1162 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1163 				nlp = port->node_table[i];
1164 				while (nlp != NULL) {
1165 					if (!nlp->nlp_tag) {
1166 						nlp = nlp->nlp_list_next;
1167 						continue;
1168 					}
1169 					nlp->nlp_tag = 0;
1170 
1171 					/*
1172 					 * Check for FCP target device that
1173 					 * matches our mask
1174 					 */
1175 					if ((nlp-> nlp_fcp_info &
1176 					    NLP_FCP_TGT_DEVICE) &&
1177 					    (nlp->nlp_DID & mask) ==
1178 					    aff_d_id) {
1179 						action = 3;
1180 						break;
1181 					}
1182 
1183 					/*
1184 					 * Check for any other device that
1185 					 * matches our mask
1186 					 */
1187 					else if ((nlp->nlp_DID & mask) ==
1188 					    aff_d_id) {
1189 						if (linkdown) {
1190 							action = 1;
1191 							break;
1192 						} else { /* Must be an RSCN */
1193 
1194 							action = 2;
1195 							break;
1196 						}
1197 					}
1198 
1199 					nlp = nlp->nlp_list_next;
1200 				}
1201 				if (action) {
1202 					break;
1203 				}
1204 			}
1205 			rw_exit(&port->node_rwlock);
1206 
1207 			/* Check if nothing was found */
1208 			if (action == 0) {
1209 				break;
1210 			} else if (action == 1) {
1211 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1212 				    NULL, NULL, NULL);
1213 			} else if (action == 2) {
1214 				/*
1215 				 * Close the node for any further normal IO
1216 				 * A PLOGI with reopen the node
1217 				 */
1218 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1219 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1220 
1221 				/* Flush tx queue */
1222 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1223 
1224 				/* Flush chip queue */
1225 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1226 
1227 			} else if (action == 3) {	/* FCP2 devices */
1228 				unreg_vpi = 0;
1229 
1230 				/*
1231 				 * Close the node for any further normal IO
1232 				 * An ADISC or a PLOGI with reopen the node
1233 				 */
1234 				emlxs_node_close(port, nlp, FC_FCP_RING, -1);
1235 				emlxs_node_close(port, nlp, FC_IP_RING,
1236 				    ((linkdown) ? 0 : 60));
1237 
1238 				/* Flush tx queues except for FCP ring */
1239 				(void) emlxs_tx_node_flush(port, nlp,
1240 				    &hba->ring[FC_CT_RING], 0, 0);
1241 				(void) emlxs_tx_node_flush(port, nlp,
1242 				    &hba->ring[FC_ELS_RING], 0, 0);
1243 				(void) emlxs_tx_node_flush(port, nlp,
1244 				    &hba->ring[FC_IP_RING], 0, 0);
1245 
1246 				/* Flush chip queues except for FCP ring */
1247 				(void) emlxs_chipq_node_flush(port,
1248 				    &hba->ring[FC_CT_RING], nlp, 0);
1249 				(void) emlxs_chipq_node_flush(port,
1250 				    &hba->ring[FC_ELS_RING], nlp, 0);
1251 				(void) emlxs_chipq_node_flush(port,
1252 				    &hba->ring[FC_IP_RING], nlp, 0);
1253 			}
1254 		}
1255 
1256 		break;
1257 
1258 	}	/* switch() */
1259 
1260 done:
1261 
1262 	if (unreg_vpi) {
1263 		(void) emlxs_mb_unreg_vpi(port);
1264 	}
1265 
1266 	return (0);
1267 
1268 }  /* emlxs_port_offline() */
1269 
1270 
1271 extern void
1272 emlxs_port_online(emlxs_port_t *vport)
1273 {
1274 	emlxs_hba_t *hba = vport->hba;
1275 	emlxs_port_t *port = &PPORT;
1276 	uint32_t state;
1277 	uint32_t update;
1278 	uint32_t npiv_linkup;
1279 	char topology[32];
1280 	char linkspeed[32];
1281 	char mode[32];
1282 
1283 	/*
1284 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1285 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1286 	 */
1287 
1288 	if ((vport->vpi > 0) &&
1289 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1290 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1291 		return;
1292 	}
1293 
1294 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1295 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1296 		return;
1297 	}
1298 
1299 	mutex_enter(&EMLXS_PORT_LOCK);
1300 
1301 	/* Check for mode */
1302 	if (port->tgt_mode) {
1303 		(void) strcpy(mode, ", target");
1304 	} else if (port->ini_mode) {
1305 		(void) strcpy(mode, ", initiator");
1306 	} else {
1307 		(void) strcpy(mode, "");
1308 	}
1309 
1310 	/* Check for loop topology */
1311 	if (hba->topology == TOPOLOGY_LOOP) {
1312 		state = FC_STATE_LOOP;
1313 		(void) strcpy(topology, ", loop");
1314 	} else {
1315 		state = FC_STATE_ONLINE;
1316 		(void) strcpy(topology, ", fabric");
1317 	}
1318 
1319 	/* Set the link speed */
1320 	switch (hba->linkspeed) {
1321 	case 0:
1322 		(void) strcpy(linkspeed, "Gb");
1323 		state |= FC_STATE_1GBIT_SPEED;
1324 		break;
1325 
1326 	case LA_1GHZ_LINK:
1327 		(void) strcpy(linkspeed, "1Gb");
1328 		state |= FC_STATE_1GBIT_SPEED;
1329 		break;
1330 	case LA_2GHZ_LINK:
1331 		(void) strcpy(linkspeed, "2Gb");
1332 		state |= FC_STATE_2GBIT_SPEED;
1333 		break;
1334 	case LA_4GHZ_LINK:
1335 		(void) strcpy(linkspeed, "4Gb");
1336 		state |= FC_STATE_4GBIT_SPEED;
1337 		break;
1338 	case LA_8GHZ_LINK:
1339 		(void) strcpy(linkspeed, "8Gb");
1340 		state |= FC_STATE_8GBIT_SPEED;
1341 		break;
1342 	case LA_10GHZ_LINK:
1343 		(void) strcpy(linkspeed, "10Gb");
1344 		state |= FC_STATE_10GBIT_SPEED;
1345 		break;
1346 	default:
1347 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1348 		break;
1349 	}
1350 
1351 	npiv_linkup = 0;
1352 	update = 0;
1353 
1354 	if ((hba->state >= FC_LINK_UP) &&
1355 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1356 		update = 1;
1357 		vport->ulp_statec = state;
1358 
1359 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1360 			hba->flag |= FC_NPIV_LINKUP;
1361 			npiv_linkup = 1;
1362 		}
1363 	}
1364 
1365 	mutex_exit(&EMLXS_PORT_LOCK);
1366 
1367 	/*
1368 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1369 	 *    "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x
1370 	 *    statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1371 	 *    hba->state, vport->ulp_statec);
1372 	 */
1373 	if (update) {
1374 		if (vport->flag & EMLXS_PORT_BOUND) {
1375 			if (vport->vpi == 0) {
1376 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1377 				    "%s%s%s", linkspeed, topology, mode);
1378 			} else if (npiv_linkup) {
1379 				EMLXS_MSGF(EMLXS_CONTEXT,
1380 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1381 				    linkspeed, topology, mode);
1382 			}
1383 
1384 			if (vport->ini_mode) {
1385 				vport->ulp_statec_cb(vport->ulp_handle,
1386 				    state);
1387 			}
1388 #ifdef SFCT_SUPPORT
1389 			else if (vport->tgt_mode) {
1390 				emlxs_fct_link_up(vport);
1391 			}
1392 #endif /* SFCT_SUPPORT */
1393 		} else {
1394 			if (vport->vpi == 0) {
1395 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1396 				    "%s%s%s *", linkspeed, topology, mode);
1397 			} else if (npiv_linkup) {
1398 				EMLXS_MSGF(EMLXS_CONTEXT,
1399 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1400 				    linkspeed, topology, mode);
1401 			}
1402 		}
1403 
1404 		/* Check for waiting threads */
1405 		if (vport->vpi == 0) {
1406 			mutex_enter(&EMLXS_LINKUP_LOCK);
1407 			if (hba->linkup_wait_flag == TRUE) {
1408 				hba->linkup_wait_flag = FALSE;
1409 				cv_broadcast(&EMLXS_LINKUP_CV);
1410 			}
1411 			mutex_exit(&EMLXS_LINKUP_LOCK);
1412 		}
1413 
1414 		/* Flush any pending ub buffers */
1415 		emlxs_ub_flush(vport);
1416 	}
1417 
1418 	return;
1419 
1420 }  /* emlxs_port_online() */
1421 
1422 
1423 extern void
1424 emlxs_linkdown(emlxs_hba_t *hba)
1425 {
1426 	emlxs_port_t *port = &PPORT;
1427 	int i;
1428 
1429 	mutex_enter(&EMLXS_PORT_LOCK);
1430 
1431 	HBASTATS.LinkDown++;
1432 	emlxs_ffstate_change_locked(hba, FC_LINK_DOWN);
1433 
1434 	/* Filter hba flags */
1435 	hba->flag &= FC_LINKDOWN_MASK;
1436 	hba->discovery_timer = 0;
1437 	hba->linkup_timer = 0;
1438 
1439 	mutex_exit(&EMLXS_PORT_LOCK);
1440 
1441 	for (i = 0; i < MAX_VPORTS; i++) {
1442 		port = &VPORT(i);
1443 
1444 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1445 			continue;
1446 		}
1447 
1448 		(void) emlxs_port_offline(port, 0xffffffff);
1449 
1450 	}
1451 
1452 	return;
1453 
1454 }  /* emlxs_linkdown() */
1455 
1456 
1457 extern void
1458 emlxs_linkup(emlxs_hba_t *hba)
1459 {
1460 	emlxs_port_t *port = &PPORT;
1461 	emlxs_config_t *cfg = &CFG;
1462 
1463 	mutex_enter(&EMLXS_PORT_LOCK);
1464 
1465 	HBASTATS.LinkUp++;
1466 	emlxs_ffstate_change_locked(hba, FC_LINK_UP);
1467 
1468 #ifdef MENLO_SUPPORT
1469 	if (hba->flag & FC_MENLO_MODE) {
1470 		mutex_exit(&EMLXS_PORT_LOCK);
1471 
1472 		/*
1473 		 * Trigger linkup CV and don't start linkup & discovery
1474 		 * timers
1475 		 */
1476 		mutex_enter(&EMLXS_LINKUP_LOCK);
1477 		cv_broadcast(&EMLXS_LINKUP_CV);
1478 		mutex_exit(&EMLXS_LINKUP_LOCK);
1479 
1480 		return;
1481 	}
1482 #endif /* MENLO_SUPPORT */
1483 
1484 	/* Set the linkup & discovery timers */
1485 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1486 	hba->discovery_timer =
1487 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1488 	    cfg[CFG_DISC_TIMEOUT].current;
1489 
1490 	mutex_exit(&EMLXS_PORT_LOCK);
1491 
1492 	return;
1493 
1494 }  /* emlxs_linkup() */
1495 
1496 
1497 /*
1498  *  emlxs_reset_link
1499  *
1500  *  Description:
1501  *  Called to reset the link with an init_link
1502  *
1503  *    Returns:
1504  *
1505  */
1506 extern int
1507 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup)
1508 {
1509 	emlxs_port_t *port = &PPORT;
1510 	emlxs_config_t *cfg;
1511 	MAILBOX *mb;
1512 
1513 	/*
1514 	 * Get a buffer to use for the mailbox command
1515 	 */
1516 	if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == NULL) {
1517 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1518 		    "Unable to allocate mailbox buffer.");
1519 
1520 		return (1);
1521 	}
1522 
1523 	cfg = &CFG;
1524 
1525 	if (linkup) {
1526 		/*
1527 		 * Setup and issue mailbox INITIALIZE LINK command
1528 		 */
1529 
1530 		emlxs_mb_init_link(hba,
1531 		    (MAILBOX *) mb,
1532 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1533 
1534 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1535 
1536 		/* Clear the loopback mode */
1537 		mutex_enter(&EMLXS_PORT_LOCK);
1538 		hba->flag &= ~FC_LOOPBACK_MODE;
1539 		hba->loopback_tics = 0;
1540 		mutex_exit(&EMLXS_PORT_LOCK);
1541 
1542 		if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT,
1543 		    0) != MBX_BUSY) {
1544 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1545 		}
1546 
1547 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1548 
1549 	} else {	/* hold link down */
1550 
1551 		emlxs_mb_down_link(hba, (MAILBOX *)mb);
1552 
1553 		if (emlxs_sli_issue_mbox_cmd(hba, (MAILBOX *)mb, MBX_NOWAIT,
1554 		    0) != MBX_BUSY) {
1555 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1556 		}
1557 
1558 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1559 		    "Disabling link...");
1560 	}
1561 
1562 	return (0);
1563 
1564 }  /* emlxs_reset_link() */
1565 
1566 
1567 extern int
1568 emlxs_online(emlxs_hba_t *hba)
1569 {
1570 	emlxs_port_t *port = &PPORT;
1571 	int32_t rval = 0;
1572 	uint32_t i = 0;
1573 
1574 	/* Make sure adapter is offline or exit trying (30 seconds) */
1575 	while (i++ < 30) {
1576 		/* Check if adapter is already going online */
1577 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1578 			return (0);
1579 		}
1580 
1581 		mutex_enter(&EMLXS_PORT_LOCK);
1582 
1583 		/* Check again */
1584 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1585 			mutex_exit(&EMLXS_PORT_LOCK);
1586 			return (0);
1587 		}
1588 
1589 		/* Check if adapter is offline */
1590 		if (hba->flag & FC_OFFLINE_MODE) {
1591 			/* Mark it going online */
1592 			hba->flag &= ~FC_OFFLINE_MODE;
1593 			hba->flag |= FC_ONLINING_MODE;
1594 
1595 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1596 			mutex_exit(&EMLXS_PORT_LOCK);
1597 			break;
1598 		}
1599 
1600 		mutex_exit(&EMLXS_PORT_LOCK);
1601 
1602 		DELAYMS(1000);
1603 	}
1604 
1605 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1606 	    "Going online...");
1607 
1608 	if (rval = emlxs_ffinit(hba)) {
1609 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1610 		    rval);
1611 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1612 
1613 		/* Set FC_OFFLINE_MODE */
1614 		mutex_enter(&EMLXS_PORT_LOCK);
1615 		emlxs_diag_state = DDI_OFFDI;
1616 		hba->flag |= FC_OFFLINE_MODE;
1617 		hba->flag &= ~FC_ONLINING_MODE;
1618 		mutex_exit(&EMLXS_PORT_LOCK);
1619 
1620 		return (rval);
1621 	}
1622 
1623 	/* Start the timer */
1624 	emlxs_timer_start(hba);
1625 
1626 	/* Set FC_ONLINE_MODE */
1627 	mutex_enter(&EMLXS_PORT_LOCK);
1628 	emlxs_diag_state = DDI_ONDI;
1629 	hba->flag |= FC_ONLINE_MODE;
1630 	hba->flag &= ~FC_ONLINING_MODE;
1631 	mutex_exit(&EMLXS_PORT_LOCK);
1632 
1633 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1634 
1635 #ifdef SFCT_SUPPORT
1636 	(void) emlxs_fct_port_initialize(port);
1637 #endif /* SFCT_SUPPORT */
1638 
1639 	return (rval);
1640 
1641 }  /* emlxs_online() */
1642 
1643 
1644 extern int
1645 emlxs_offline(emlxs_hba_t *hba)
1646 {
1647 	emlxs_port_t *port = &PPORT;
1648 	uint32_t i = 0;
1649 	int rval = 1;
1650 
1651 	/* Make sure adapter is online or exit trying (30 seconds) */
1652 	while (i++ < 30) {
1653 		/* Check if adapter is already going offline */
1654 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1655 			return (0);
1656 		}
1657 
1658 		mutex_enter(&EMLXS_PORT_LOCK);
1659 
1660 		/* Check again */
1661 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1662 			mutex_exit(&EMLXS_PORT_LOCK);
1663 			return (0);
1664 		}
1665 
1666 		/* Check if adapter is online */
1667 		if (hba->flag & FC_ONLINE_MODE) {
1668 			/* Mark it going offline */
1669 			hba->flag &= ~FC_ONLINE_MODE;
1670 			hba->flag |= FC_OFFLINING_MODE;
1671 
1672 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1673 			mutex_exit(&EMLXS_PORT_LOCK);
1674 			break;
1675 		}
1676 
1677 		mutex_exit(&EMLXS_PORT_LOCK);
1678 
1679 		DELAYMS(1000);
1680 	}
1681 
1682 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1683 	    "Going offline...");
1684 
1685 	if (port->ini_mode) {
1686 		/* Flush all IO */
1687 		emlxs_linkdown(hba);
1688 
1689 	}
1690 #ifdef SFCT_SUPPORT
1691 	else {
1692 		(void) emlxs_fct_port_shutdown(port);
1693 	}
1694 #endif /* SFCT_SUPPORT */
1695 
1696 	/* Check if adapter was shutdown */
1697 	if (hba->flag & FC_HARDWARE_ERROR) {
1698 		/*
1699 		 * Force mailbox cleanup
1700 		 * This will wake any sleeping or polling threads
1701 		 */
1702 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1703 	}
1704 
1705 	/* Pause here for the IO to settle */
1706 	delay(drv_usectohz(1000000));	/* 1 sec */
1707 
1708 	/* Unregister all nodes */
1709 	emlxs_ffcleanup(hba);
1710 
1711 
1712 	if (hba->bus_type == SBUS_FC) {
1713 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba, hba->sbus_csr_addr),
1714 		    0x9A);
1715 #ifdef FMA_SUPPORT
1716 		if (emlxs_fm_check_acc_handle(hba, hba->sbus_csr_handle)
1717 		    != DDI_FM_OK) {
1718 			EMLXS_MSGF(EMLXS_CONTEXT,
1719 			    &emlxs_invalid_access_handle_msg, NULL);
1720 		}
1721 #endif  /* FMA_SUPPORT */
1722 	}
1723 
1724 	/* Stop the timer */
1725 	emlxs_timer_stop(hba);
1726 
1727 	/* For safety flush every iotag list */
1728 	if (emlxs_iotag_flush(hba)) {
1729 		/* Pause here for the IO to flush */
1730 		delay(drv_usectohz(1000));
1731 	}
1732 
1733 	/* Wait for poll command request to settle */
1734 	while (hba->io_poll_count > 0) {
1735 		delay(drv_usectohz(2000000));   /* 2 sec */
1736 	}
1737 
1738 	emlxs_sli_offline(hba);
1739 
1740 	/* Free all the shared memory */
1741 	(void) emlxs_mem_free_buffer(hba);
1742 
1743 	mutex_enter(&EMLXS_PORT_LOCK);
1744 	hba->flag |= FC_OFFLINE_MODE;
1745 	hba->flag &= ~FC_OFFLINING_MODE;
1746 	emlxs_diag_state = DDI_OFFDI;
1747 	mutex_exit(&EMLXS_PORT_LOCK);
1748 
1749 	rval = 0;
1750 
1751 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1752 
1753 done:
1754 
1755 	return (rval);
1756 
1757 }  /* emlxs_offline() */
1758 
1759 
1760 
1761 extern int
1762 emlxs_power_down(emlxs_hba_t *hba)
1763 {
1764 #ifdef FMA_SUPPORT
1765 	emlxs_port_t *port = &PPORT;
1766 #endif  /* FMA_SUPPORT */
1767 	int32_t rval = 0;
1768 	uint32_t *ptr;
1769 	uint32_t i;
1770 
1771 	if ((rval = emlxs_offline(hba))) {
1772 		return (rval);
1773 	}
1774 
1775 	/* Save pci config space */
1776 	ptr = (uint32_t *)hba->pm_config;
1777 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1778 		*ptr =
1779 		    ddi_get32(hba->pci_acc_handle,
1780 		    (uint32_t *)(hba->pci_addr + i));
1781 	}
1782 
1783 	/* Put chip in D3 state */
1784 	(void) ddi_put8(hba->pci_acc_handle,
1785 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1786 	    (uint8_t)PCI_PM_D3_STATE);
1787 
1788 #ifdef FMA_SUPPORT
1789 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1790 	    != DDI_FM_OK) {
1791 		EMLXS_MSGF(EMLXS_CONTEXT,
1792 		    &emlxs_invalid_access_handle_msg, NULL);
1793 		return (1);
1794 	}
1795 #endif  /* FMA_SUPPORT */
1796 
1797 	return (0);
1798 
1799 }  /* End emlxs_power_down */
1800 
1801 
1802 extern int
1803 emlxs_power_up(emlxs_hba_t *hba)
1804 {
1805 #ifdef FMA_SUPPORT
1806 	emlxs_port_t *port = &PPORT;
1807 #endif  /* FMA_SUPPORT */
1808 	int32_t rval = 0;
1809 	uint32_t *ptr;
1810 	uint32_t i;
1811 
1812 
1813 	/* Take chip out of D3 state */
1814 	(void) ddi_put8(hba->pci_acc_handle,
1815 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1816 	    (uint8_t)PCI_PM_D0_STATE);
1817 
1818 	/* Must have at least 10 ms delay here */
1819 	DELAYMS(100);
1820 
1821 	/* Restore pci config space */
1822 	ptr = (uint32_t *)hba->pm_config;
1823 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1824 		(void) ddi_put32(hba->pci_acc_handle,
1825 		    (uint32_t *)(hba->pci_addr + i), *ptr);
1826 	}
1827 
1828 #ifdef FMA_SUPPORT
1829 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1830 	    != DDI_FM_OK) {
1831 		EMLXS_MSGF(EMLXS_CONTEXT,
1832 		    &emlxs_invalid_access_handle_msg, NULL);
1833 		return (1);
1834 	}
1835 #endif  /* FMA_SUPPORT */
1836 
1837 	/* Bring adapter online */
1838 	if ((rval = emlxs_online(hba))) {
1839 		(void) ddi_put8(hba->pci_acc_handle,
1840 		    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1841 		    (uint8_t)PCI_PM_D3_STATE);
1842 
1843 		return (rval);
1844 	}
1845 
1846 	return (rval);
1847 
1848 }  /* End emlxs_power_up */
1849 
1850 
1851 /*
1852  *
1853  * NAME:     emlxs_ffcleanup
1854  *
1855  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1856  *
1857  * EXECUTION ENVIRONMENT: process only
1858  *
1859  * CALLED FROM: CFG_TERM
1860  *
1861  * INPUT: hba       - pointer to the dev_ctl area.
1862  *
1863  * RETURNS: none
1864  */
1865 extern void
1866 emlxs_ffcleanup(emlxs_hba_t *hba)
1867 {
1868 	emlxs_port_t *port = &PPORT;
1869 	uint32_t i;
1870 
1871 	/* Disable all but the mailbox interrupt */
1872 	emlxs_disable_intr(hba, HC_MBINT_ENA);
1873 
1874 	/* Make sure all port nodes are destroyed */
1875 	for (i = 0; i < MAX_VPORTS; i++) {
1876 		port = &VPORT(i);
1877 
1878 		if (port->node_count) {
1879 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1880 		}
1881 	}
1882 
1883 	/* Clear all interrupt enable conditions */
1884 	emlxs_disable_intr(hba, 0);
1885 
1886 	return;
1887 
1888 }  /* emlxs_ffcleanup() */
1889 
1890 
1891 extern uint16_t
1892 emlxs_register_pkt(RING *rp, emlxs_buf_t *sbp)
1893 {
1894 	emlxs_hba_t *hba;
1895 	emlxs_port_t *port;
1896 	uint16_t iotag;
1897 	uint32_t i;
1898 
1899 	hba = rp->hba;
1900 
1901 	mutex_enter(&EMLXS_FCTAB_LOCK(rp->ringno));
1902 
1903 	if (sbp->iotag != 0) {
1904 		port = &PPORT;
1905 
1906 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1907 		    "Pkt already registered! ringo=%d iotag=%d sbp=%p",
1908 		    sbp->ring, sbp->iotag, sbp);
1909 	}
1910 
1911 	iotag = 0;
1912 	for (i = 0; i < rp->max_iotag; i++) {
1913 		if (!rp->fc_iotag || rp->fc_iotag >= rp->max_iotag) {
1914 			rp->fc_iotag = 1;
1915 		}
1916 		iotag = rp->fc_iotag++;
1917 
1918 		if (rp->fc_table[iotag] == 0 ||
1919 		    rp->fc_table[iotag] == STALE_PACKET) {
1920 			hba->io_count[rp->ringno]++;
1921 			rp->fc_table[iotag] = sbp;
1922 
1923 			sbp->iotag = iotag;
1924 			sbp->ring = rp;
1925 
1926 			break;
1927 		}
1928 		iotag = 0;
1929 	}
1930 
1931 	mutex_exit(&EMLXS_FCTAB_LOCK(rp->ringno));
1932 
1933 	/*
1934 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1935 	 *    "emlxs_register_pkt: ringo=%d iotag=%d sbp=%p",
1936 	 *    rp->ringno, iotag, sbp);
1937 	 */
1938 
1939 	return (iotag);
1940 
1941 }  /* emlxs_register_pkt() */
1942 
1943 
1944 
1945 extern emlxs_buf_t *
1946 emlxs_unregister_pkt(RING *rp, uint16_t iotag, uint32_t forced)
1947 {
1948 	emlxs_hba_t *hba;
1949 	emlxs_buf_t *sbp;
1950 	uint32_t ringno;
1951 
1952 	/* Check the iotag range */
1953 	if ((iotag == 0) || (iotag >= rp->max_iotag)) {
1954 		return (NULL);
1955 	}
1956 
1957 	sbp = NULL;
1958 	hba = rp->hba;
1959 	ringno = rp->ringno;
1960 
1961 	/* Remove the sbp from the table */
1962 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1963 	sbp = rp->fc_table[iotag];
1964 
1965 	if (!sbp || (sbp == STALE_PACKET)) {
1966 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1967 		return (sbp);
1968 	}
1969 
1970 	rp->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1971 	hba->io_count[ringno]--;
1972 	sbp->iotag = 0;
1973 
1974 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1975 
1976 
1977 	/* Clean up the sbp */
1978 	mutex_enter(&sbp->mtx);
1979 
1980 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
1981 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
1982 		hba->ring_tx_count[ringno]--;
1983 	}
1984 
1985 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1986 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1987 	}
1988 
1989 	if (sbp->bmp) {
1990 		(void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1991 		sbp->bmp = 0;
1992 	}
1993 
1994 	mutex_exit(&sbp->mtx);
1995 
1996 	return (sbp);
1997 
1998 }  /* emlxs_unregister_pkt() */
1999 
2000 
2001 
2002 /* Flush all IO's to all nodes for a given ring */
2003 extern uint32_t
2004 emlxs_tx_ring_flush(emlxs_hba_t *hba, RING *rp, emlxs_buf_t *fpkt)
2005 {
2006 	emlxs_port_t *port = &PPORT;
2007 	emlxs_buf_t *sbp;
2008 	IOCBQ *iocbq;
2009 	IOCBQ *next;
2010 	IOCB *iocb;
2011 	uint32_t ringno;
2012 	Q abort;
2013 	NODELIST *ndlp;
2014 	IOCB *icmd;
2015 	MATCHMAP *mp;
2016 	uint32_t i;
2017 
2018 	ringno = rp->ringno;
2019 	bzero((void *)&abort, sizeof (Q));
2020 
2021 	mutex_enter(&EMLXS_RINGTX_LOCK);
2022 
2023 	/* While a node needs servicing */
2024 	while (rp->nodeq.q_first) {
2025 		ndlp = (NODELIST *) rp->nodeq.q_first;
2026 
2027 		/* Check if priority queue is not empty */
2028 		if (ndlp->nlp_ptx[ringno].q_first) {
2029 			/* Transfer all iocb's to local queue */
2030 			if (abort.q_first == 0) {
2031 				abort.q_first = ndlp->nlp_ptx[ringno].q_first;
2032 			} else {
2033 				((IOCBQ *)abort.q_last)->next =
2034 				    (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
2035 			}
2036 
2037 			abort.q_last = ndlp->nlp_ptx[ringno].q_last;
2038 			abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2039 		}
2040 
2041 		/* Check if tx queue is not empty */
2042 		if (ndlp->nlp_tx[ringno].q_first) {
2043 			/* Transfer all iocb's to local queue */
2044 			if (abort.q_first == 0) {
2045 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
2046 			} else {
2047 				((IOCBQ *)abort.q_last)->next =
2048 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2049 			}
2050 
2051 			abort.q_last = ndlp->nlp_tx[ringno].q_last;
2052 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2053 
2054 		}
2055 
2056 		/* Clear the queue pointers */
2057 		ndlp->nlp_ptx[ringno].q_first = NULL;
2058 		ndlp->nlp_ptx[ringno].q_last = NULL;
2059 		ndlp->nlp_ptx[ringno].q_cnt = 0;
2060 
2061 		ndlp->nlp_tx[ringno].q_first = NULL;
2062 		ndlp->nlp_tx[ringno].q_last = NULL;
2063 		ndlp->nlp_tx[ringno].q_cnt = 0;
2064 
2065 		/* Remove node from service queue */
2066 
2067 		/* If this is the last node on list */
2068 		if (rp->nodeq.q_last == (void *)ndlp) {
2069 			rp->nodeq.q_last = NULL;
2070 			rp->nodeq.q_first = NULL;
2071 			rp->nodeq.q_cnt = 0;
2072 		} else {
2073 			/* Remove node from head */
2074 			rp->nodeq.q_first = ndlp->nlp_next[ringno];
2075 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2076 			    rp->nodeq.q_first;
2077 			rp->nodeq.q_cnt--;
2078 		}
2079 
2080 		/* Clear node */
2081 		ndlp->nlp_next[ringno] = NULL;
2082 	}
2083 
2084 	/* First cleanup the iocb's while still holding the lock */
2085 	iocbq = (IOCBQ *) abort.q_first;
2086 	while (iocbq) {
2087 		/* Free the IoTag and the bmp */
2088 		iocb = &iocbq->iocb;
2089 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2090 
2091 		if (sbp && (sbp != STALE_PACKET)) {
2092 			mutex_enter(&sbp->mtx);
2093 
2094 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2095 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2096 				hba->ring_tx_count[ringno]--;
2097 			}
2098 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2099 
2100 			/*
2101 			 * If the fpkt is already set, then we will leave it
2102 			 * alone. This ensures that this pkt is only accounted
2103 			 * for on one fpkt->flush_count
2104 			 */
2105 			if (!sbp->fpkt && fpkt) {
2106 				mutex_enter(&fpkt->mtx);
2107 				sbp->fpkt = fpkt;
2108 				fpkt->flush_count++;
2109 				mutex_exit(&fpkt->mtx);
2110 			}
2111 
2112 			mutex_exit(&sbp->mtx);
2113 		}
2114 
2115 		iocbq = (IOCBQ *)iocbq->next;
2116 
2117 	}	/* end of while */
2118 
2119 	mutex_exit(&EMLXS_RINGTX_LOCK);
2120 
2121 	/* Now abort the iocb's */
2122 	iocbq = (IOCBQ *)abort.q_first;
2123 	while (iocbq) {
2124 		/* Save the next iocbq for now */
2125 		next = (IOCBQ *)iocbq->next;
2126 
2127 		/* Unlink this iocbq */
2128 		iocbq->next = NULL;
2129 
2130 		/* Get the pkt */
2131 		sbp = (emlxs_buf_t *)iocbq->sbp;
2132 
2133 		if (sbp) {
2134 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2135 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2136 
2137 			if (hba->state >= FC_LINK_UP) {
2138 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2139 				    IOERR_ABORT_REQUESTED, 1);
2140 			} else {
2141 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2142 				    IOERR_LINK_DOWN, 1);
2143 			}
2144 
2145 		}
2146 		/* Free the iocb and its associated buffers */
2147 		else {
2148 			icmd = &iocbq->iocb;
2149 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2150 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2151 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2152 				if ((hba->flag &
2153 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2154 					/* HBA is detaching or offlining */
2155 					if (icmd->ulpCommand !=
2156 					    CMD_QUE_RING_LIST64_CN) {
2157 						uint8_t	*tmp;
2158 
2159 						for (i = 0;
2160 						    i < icmd->ulpBdeCount;
2161 						    i++) {
2162 							mp = EMLXS_GET_VADDR(
2163 							    hba, rp, icmd);
2164 
2165 							tmp = (uint8_t *)mp;
2166 							if (mp) {
2167 							(void) emlxs_mem_put(
2168 							    hba, MEM_BUF, tmp);
2169 							}
2170 						}
2171 					}
2172 
2173 					(void) emlxs_mem_put(hba, MEM_IOCB,
2174 					    (uint8_t *)iocbq);
2175 				} else {
2176 					/* repost the unsolicited buffer */
2177 					emlxs_sli_issue_iocb_cmd(hba, rp,
2178 					    iocbq);
2179 				}
2180 			}
2181 		}
2182 
2183 		iocbq = next;
2184 
2185 	}	/* end of while */
2186 
2187 	return (abort.q_cnt);
2188 
2189 }  /* emlxs_tx_ring_flush() */
2190 
2191 
2192 /* Flush all IO's on all or a given ring for a given node */
2193 extern uint32_t
2194 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, RING *ring,
2195     uint32_t shutdown, emlxs_buf_t *fpkt)
2196 {
2197 	emlxs_hba_t *hba = HBA;
2198 	emlxs_buf_t *sbp;
2199 	uint32_t ringno;
2200 	RING *rp;
2201 	IOCB *icmd;
2202 	IOCBQ *iocbq;
2203 	NODELIST *prev;
2204 	IOCBQ *next;
2205 	IOCB *iocb;
2206 	Q abort;
2207 	uint32_t i;
2208 	MATCHMAP *mp;
2209 
2210 
2211 	bzero((void *)&abort, sizeof (Q));
2212 
2213 	/* Flush all I/O's on tx queue to this target */
2214 	mutex_enter(&EMLXS_RINGTX_LOCK);
2215 
2216 	if (!ndlp->nlp_base && shutdown) {
2217 		ndlp->nlp_active = 0;
2218 	}
2219 
2220 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2221 		rp = &hba->ring[ringno];
2222 
2223 		if (ring && rp != ring) {
2224 			continue;
2225 		}
2226 
2227 		if (!ndlp->nlp_base || shutdown) {
2228 			/* Check if priority queue is not empty */
2229 			if (ndlp->nlp_ptx[ringno].q_first) {
2230 				/* Transfer all iocb's to local queue */
2231 				if (abort.q_first == 0) {
2232 					abort.q_first =
2233 					    ndlp->nlp_ptx[ringno].q_first;
2234 				} else {
2235 					((IOCBQ *)abort.q_last)->next =
2236 					    (IOCBQ *)ndlp->nlp_ptx[ringno].
2237 					    q_first;
2238 				}
2239 
2240 				abort.q_last = ndlp->nlp_ptx[ringno].q_last;
2241 				abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2242 			}
2243 		}
2244 
2245 		/* Check if tx queue is not empty */
2246 		if (ndlp->nlp_tx[ringno].q_first) {
2247 			/* Transfer all iocb's to local queue */
2248 			if (abort.q_first == 0) {
2249 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
2250 			} else {
2251 				((IOCBQ *)abort.q_last)->next =
2252 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2253 			}
2254 
2255 			abort.q_last = ndlp->nlp_tx[ringno].q_last;
2256 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2257 		}
2258 
2259 		/* Clear the queue pointers */
2260 		ndlp->nlp_ptx[ringno].q_first = NULL;
2261 		ndlp->nlp_ptx[ringno].q_last = NULL;
2262 		ndlp->nlp_ptx[ringno].q_cnt = 0;
2263 
2264 		ndlp->nlp_tx[ringno].q_first = NULL;
2265 		ndlp->nlp_tx[ringno].q_last = NULL;
2266 		ndlp->nlp_tx[ringno].q_cnt = 0;
2267 
2268 		/* If this node was on the ring queue, remove it */
2269 		if (ndlp->nlp_next[ringno]) {
2270 			/* If this is the only node on list */
2271 			if (rp->nodeq.q_first == (void *)ndlp &&
2272 			    rp->nodeq.q_last == (void *)ndlp) {
2273 				rp->nodeq.q_last = NULL;
2274 				rp->nodeq.q_first = NULL;
2275 				rp->nodeq.q_cnt = 0;
2276 			} else if (rp->nodeq.q_first == (void *)ndlp) {
2277 				rp->nodeq.q_first = ndlp->nlp_next[ringno];
2278 				((NODELIST *) rp->nodeq.q_last)->
2279 				    nlp_next[ringno] = rp->nodeq.q_first;
2280 				rp->nodeq.q_cnt--;
2281 			} else {
2282 				/*
2283 				 * This is a little more difficult find the
2284 				 * previous node in the circular ring queue
2285 				 */
2286 				prev = ndlp;
2287 				while (prev->nlp_next[ringno] != ndlp) {
2288 					prev = prev->nlp_next[ringno];
2289 				}
2290 
2291 				prev->nlp_next[ringno] =
2292 				    ndlp->nlp_next[ringno];
2293 
2294 				if (rp->nodeq.q_last == (void *)ndlp) {
2295 					rp->nodeq.q_last = (void *)prev;
2296 				}
2297 				rp->nodeq.q_cnt--;
2298 
2299 			}
2300 
2301 			/* Clear node */
2302 			ndlp->nlp_next[ringno] = NULL;
2303 		}
2304 
2305 	}
2306 
2307 	/* First cleanup the iocb's while still holding the lock */
2308 	iocbq = (IOCBQ *) abort.q_first;
2309 	while (iocbq) {
2310 		/* Free the IoTag and the bmp */
2311 		iocb = &iocbq->iocb;
2312 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2313 
2314 		if (sbp && (sbp != STALE_PACKET)) {
2315 			mutex_enter(&sbp->mtx);
2316 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2317 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2318 				hba->ring_tx_count[ring->ringno]--;
2319 			}
2320 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2321 
2322 			/*
2323 			 * If the fpkt is already set, then we will leave it
2324 			 * alone. This ensures that this pkt is only accounted
2325 			 * for on one fpkt->flush_count
2326 			 */
2327 			if (!sbp->fpkt && fpkt) {
2328 				mutex_enter(&fpkt->mtx);
2329 				sbp->fpkt = fpkt;
2330 				fpkt->flush_count++;
2331 				mutex_exit(&fpkt->mtx);
2332 			}
2333 
2334 			mutex_exit(&sbp->mtx);
2335 		}
2336 
2337 		iocbq = (IOCBQ *) iocbq->next;
2338 
2339 	}	/* end of while */
2340 
2341 	mutex_exit(&EMLXS_RINGTX_LOCK);
2342 
2343 	/* Now abort the iocb's outside the locks */
2344 	iocbq = (IOCBQ *)abort.q_first;
2345 	while (iocbq) {
2346 		/* Save the next iocbq for now */
2347 		next = (IOCBQ *)iocbq->next;
2348 
2349 		/* Unlink this iocbq */
2350 		iocbq->next = NULL;
2351 
2352 		/* Get the pkt */
2353 		sbp = (emlxs_buf_t *)iocbq->sbp;
2354 
2355 		if (sbp) {
2356 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2357 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2358 
2359 			if (hba->state >= FC_LINK_UP) {
2360 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2361 				    IOERR_ABORT_REQUESTED, 1);
2362 			} else {
2363 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2364 				    IOERR_LINK_DOWN, 1);
2365 			}
2366 
2367 		}
2368 		/* Free the iocb and its associated buffers */
2369 		else {
2370 			icmd = &iocbq->iocb;
2371 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2372 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2373 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2374 				if ((hba->flag &
2375 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2376 					/* HBA is detaching or offlining */
2377 					if (icmd->ulpCommand !=
2378 					    CMD_QUE_RING_LIST64_CN) {
2379 						uint8_t	*tmp;
2380 
2381 						for (i = 0;
2382 						    i < icmd->ulpBdeCount;
2383 						    i++) {
2384 							mp = EMLXS_GET_VADDR(
2385 							    hba, rp, icmd);
2386 
2387 							tmp = (uint8_t *)mp;
2388 							if (mp) {
2389 							(void) emlxs_mem_put(
2390 							    hba, MEM_BUF, tmp);
2391 							}
2392 						}
2393 					}
2394 
2395 					(void) emlxs_mem_put(hba, MEM_IOCB,
2396 					    (uint8_t *)iocbq);
2397 				} else {
2398 					/* repost the unsolicited buffer */
2399 					emlxs_sli_issue_iocb_cmd(hba, rp,
2400 					    iocbq);
2401 				}
2402 			}
2403 		}
2404 
2405 		iocbq = next;
2406 
2407 	}	/* end of while */
2408 
2409 	return (abort.q_cnt);
2410 
2411 }  /* emlxs_tx_node_flush() */
2412 
2413 
2414 /* Check for IO's on all or a given ring for a given node */
2415 extern uint32_t
2416 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, RING *ring)
2417 {
2418 	emlxs_hba_t *hba = HBA;
2419 	uint32_t ringno;
2420 	RING *rp;
2421 	uint32_t count;
2422 
2423 	count = 0;
2424 
2425 	/* Flush all I/O's on tx queue to this target */
2426 	mutex_enter(&EMLXS_RINGTX_LOCK);
2427 
2428 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2429 		rp = &hba->ring[ringno];
2430 
2431 		if (ring && rp != ring) {
2432 			continue;
2433 		}
2434 
2435 		/* Check if priority queue is not empty */
2436 		if (ndlp->nlp_ptx[ringno].q_first) {
2437 			count += ndlp->nlp_ptx[ringno].q_cnt;
2438 		}
2439 
2440 		/* Check if tx queue is not empty */
2441 		if (ndlp->nlp_tx[ringno].q_first) {
2442 			count += ndlp->nlp_tx[ringno].q_cnt;
2443 		}
2444 
2445 	}
2446 
2447 	mutex_exit(&EMLXS_RINGTX_LOCK);
2448 
2449 	return (count);
2450 
2451 }  /* emlxs_tx_node_check() */
2452 
2453 
2454 
2455 /* Flush all IO's on the FCP ring for a given node's lun */
2456 extern uint32_t
2457 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2458     emlxs_buf_t *fpkt)
2459 {
2460 	emlxs_hba_t *hba = HBA;
2461 	emlxs_buf_t *sbp;
2462 	uint32_t ringno;
2463 	IOCBQ *iocbq;
2464 	IOCBQ *prev;
2465 	IOCBQ *next;
2466 	IOCB *iocb;
2467 	IOCB *icmd;
2468 	Q abort;
2469 	uint32_t i;
2470 	MATCHMAP *mp;
2471 	RING *rp;
2472 
2473 	ringno = FC_FCP_RING;
2474 	rp = &hba->ring[ringno];
2475 
2476 	bzero((void *)&abort, sizeof (Q));
2477 
2478 	/* Flush I/O's on txQ to this target's lun */
2479 	mutex_enter(&EMLXS_RINGTX_LOCK);
2480 
2481 	/* Scan the priority queue first */
2482 	prev = NULL;
2483 	iocbq = (IOCBQ *) ndlp->nlp_ptx[ringno].q_first;
2484 
2485 	while (iocbq) {
2486 		next = (IOCBQ *)iocbq->next;
2487 		iocb = &iocbq->iocb;
2488 		sbp = (emlxs_buf_t *)iocbq->sbp;
2489 
2490 		/* Check if this IO is for our lun */
2491 		if (sbp->lun == lun) {
2492 			/* Remove iocb from the node's tx queue */
2493 			if (next == 0) {
2494 				ndlp->nlp_ptx[ringno].q_last =
2495 				    (uint8_t *)prev;
2496 			}
2497 
2498 			if (prev == 0) {
2499 				ndlp->nlp_ptx[ringno].q_first =
2500 				    (uint8_t *)next;
2501 			} else {
2502 				prev->next = next;
2503 			}
2504 
2505 			iocbq->next = NULL;
2506 			ndlp->nlp_ptx[ringno].q_cnt--;
2507 
2508 			/*
2509 			 * Add this iocb to our local abort Q
2510 			 * This way we don't hold the RINGTX lock too long
2511 			 */
2512 			if (abort.q_first) {
2513 				((IOCBQ *)abort.q_last)->next = iocbq;
2514 				abort.q_last = (uint8_t *)iocbq;
2515 				abort.q_cnt++;
2516 			} else {
2517 				abort.q_first = (uint8_t *)iocbq;
2518 				abort.q_last = (uint8_t *)iocbq;
2519 				abort.q_cnt = 1;
2520 			}
2521 			iocbq->next = NULL;
2522 		} else {
2523 			prev = iocbq;
2524 		}
2525 
2526 		iocbq = next;
2527 
2528 	}	/* while (iocbq) */
2529 
2530 
2531 	/* Scan the regular queue */
2532 	prev = NULL;
2533 	iocbq = (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2534 
2535 	while (iocbq) {
2536 		next = (IOCBQ *)iocbq->next;
2537 		iocb = &iocbq->iocb;
2538 		sbp = (emlxs_buf_t *)iocbq->sbp;
2539 
2540 		/* Check if this IO is for our lun */
2541 		if (sbp->lun == lun) {
2542 			/* Remove iocb from the node's tx queue */
2543 			if (next == 0) {
2544 				ndlp->nlp_tx[ringno].q_last =
2545 				    (uint8_t *)prev;
2546 			}
2547 
2548 			if (prev == 0) {
2549 				ndlp->nlp_tx[ringno].q_first =
2550 				    (uint8_t *)next;
2551 			} else {
2552 				prev->next = next;
2553 			}
2554 
2555 			iocbq->next = NULL;
2556 			ndlp->nlp_tx[ringno].q_cnt--;
2557 
2558 			/*
2559 			 * Add this iocb to our local abort Q
2560 			 * This way we don't hold the RINGTX lock too long
2561 			 */
2562 			if (abort.q_first) {
2563 				((IOCBQ *) abort.q_last)->next = iocbq;
2564 				abort.q_last = (uint8_t *)iocbq;
2565 				abort.q_cnt++;
2566 			} else {
2567 				abort.q_first = (uint8_t *)iocbq;
2568 				abort.q_last = (uint8_t *)iocbq;
2569 				abort.q_cnt = 1;
2570 			}
2571 			iocbq->next = NULL;
2572 		} else {
2573 			prev = iocbq;
2574 		}
2575 
2576 		iocbq = next;
2577 
2578 	}	/* while (iocbq) */
2579 
2580 	/* First cleanup the iocb's while still holding the lock */
2581 	iocbq = (IOCBQ *)abort.q_first;
2582 	while (iocbq) {
2583 		/* Free the IoTag and the bmp */
2584 		iocb = &iocbq->iocb;
2585 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2586 
2587 		if (sbp && (sbp != STALE_PACKET)) {
2588 			mutex_enter(&sbp->mtx);
2589 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2590 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2591 				hba->ring_tx_count[ringno]--;
2592 			}
2593 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2594 
2595 			/*
2596 			 * If the fpkt is already set, then we will leave it
2597 			 * alone. This ensures that this pkt is only accounted
2598 			 * for on one fpkt->flush_count
2599 			 */
2600 			if (!sbp->fpkt && fpkt) {
2601 				mutex_enter(&fpkt->mtx);
2602 				sbp->fpkt = fpkt;
2603 				fpkt->flush_count++;
2604 				mutex_exit(&fpkt->mtx);
2605 			}
2606 
2607 			mutex_exit(&sbp->mtx);
2608 		}
2609 
2610 		iocbq = (IOCBQ *) iocbq->next;
2611 
2612 	}	/* end of while */
2613 
2614 	mutex_exit(&EMLXS_RINGTX_LOCK);
2615 
2616 	/* Now abort the iocb's outside the locks */
2617 	iocbq = (IOCBQ *)abort.q_first;
2618 	while (iocbq) {
2619 		/* Save the next iocbq for now */
2620 		next = (IOCBQ *)iocbq->next;
2621 
2622 		/* Unlink this iocbq */
2623 		iocbq->next = NULL;
2624 
2625 		/* Get the pkt */
2626 		sbp = (emlxs_buf_t *)iocbq->sbp;
2627 
2628 		if (sbp) {
2629 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2630 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2631 
2632 			if (hba->state >= FC_LINK_UP) {
2633 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2634 				    IOERR_ABORT_REQUESTED, 1);
2635 			} else {
2636 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2637 				    IOERR_LINK_DOWN, 1);
2638 			}
2639 		}
2640 
2641 		/* Free the iocb and its associated buffers */
2642 		else {
2643 			icmd = &iocbq->iocb;
2644 
2645 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2646 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2647 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2648 				if ((hba->flag &
2649 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2650 					/* HBA is detaching or offlining */
2651 					if (icmd->ulpCommand !=
2652 					    CMD_QUE_RING_LIST64_CN) {
2653 						uint8_t	*tmp;
2654 
2655 						for (i = 0;
2656 						    i < icmd->ulpBdeCount;
2657 						    i++) {
2658 							mp = EMLXS_GET_VADDR(
2659 							    hba, rp, icmd);
2660 
2661 							tmp = (uint8_t *)mp;
2662 							if (mp) {
2663 							(void) emlxs_mem_put(
2664 							    hba, MEM_BUF, tmp);
2665 							}
2666 						}
2667 					}
2668 
2669 					(void) emlxs_mem_put(hba, MEM_IOCB,
2670 					    (uint8_t *)iocbq);
2671 				} else {
2672 					/* repost the unsolicited buffer */
2673 					emlxs_sli_issue_iocb_cmd(hba, rp,
2674 					    iocbq);
2675 				}
2676 			}
2677 		}
2678 
2679 		iocbq = next;
2680 
2681 	}	/* end of while */
2682 
2683 
2684 	return (abort.q_cnt);
2685 
2686 }  /* emlxs_tx_lun_flush() */
2687 
2688 
2689 extern void
2690 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2691 {
2692 	emlxs_hba_t *hba;
2693 	emlxs_port_t *port;
2694 	uint32_t ringno;
2695 	NODELIST *nlp;
2696 	RING *rp;
2697 	emlxs_buf_t *sbp;
2698 
2699 	port = (emlxs_port_t *)iocbq->port;
2700 	hba = HBA;
2701 	rp = (RING *)iocbq->ring;
2702 	nlp = (NODELIST *)iocbq->node;
2703 	ringno = rp->ringno;
2704 	sbp = (emlxs_buf_t *)iocbq->sbp;
2705 
2706 	if (nlp == NULL) {
2707 		/* Set node to base node by default */
2708 		nlp = &port->node_base;
2709 
2710 		iocbq->node = (void *)nlp;
2711 
2712 		if (sbp) {
2713 			sbp->node = (void *)nlp;
2714 		}
2715 	}
2716 
2717 	if (lock) {
2718 		mutex_enter(&EMLXS_RINGTX_LOCK);
2719 	}
2720 
2721 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2722 		if (sbp) {
2723 			mutex_enter(&sbp->mtx);
2724 
2725 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2726 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2727 				hba->ring_tx_count[ringno]--;
2728 			}
2729 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2730 
2731 			mutex_exit(&sbp->mtx);
2732 
2733 			/* Free the ulpIoTag and the bmp */
2734 			(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
2735 
2736 			if (lock) {
2737 				mutex_exit(&EMLXS_RINGTX_LOCK);
2738 			}
2739 
2740 			if (hba->state >= FC_LINK_UP) {
2741 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2742 				    IOERR_ABORT_REQUESTED, 1);
2743 			} else {
2744 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2745 				    IOERR_LINK_DOWN, 1);
2746 			}
2747 			return;
2748 		} else {
2749 			if (lock) {
2750 				mutex_exit(&EMLXS_RINGTX_LOCK);
2751 			}
2752 
2753 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2754 		}
2755 
2756 		return;
2757 	}
2758 
2759 	if (sbp) {
2760 
2761 		mutex_enter(&sbp->mtx);
2762 
2763 		if (sbp->pkt_flags &
2764 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2765 			mutex_exit(&sbp->mtx);
2766 			if (lock) {
2767 				mutex_exit(&EMLXS_RINGTX_LOCK);
2768 			}
2769 			return;
2770 		}
2771 
2772 		sbp->pkt_flags |= PACKET_IN_TXQ;
2773 		hba->ring_tx_count[ringno]++;
2774 
2775 		mutex_exit(&sbp->mtx);
2776 	}
2777 
2778 
2779 	/* Check iocbq priority */
2780 	if (iocbq->flag & IOCB_PRIORITY) {
2781 		/* Add the iocb to the bottom of the node's ptx queue */
2782 		if (nlp->nlp_ptx[ringno].q_first) {
2783 			((IOCBQ *)nlp->nlp_ptx[ringno].q_last)->next = iocbq;
2784 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2785 			nlp->nlp_ptx[ringno].q_cnt++;
2786 		} else {
2787 			nlp->nlp_ptx[ringno].q_first = (uint8_t *)iocbq;
2788 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2789 			nlp->nlp_ptx[ringno].q_cnt = 1;
2790 		}
2791 
2792 		iocbq->next = NULL;
2793 	} else {	/* Normal priority */
2794 
2795 
2796 		/* Add the iocb to the bottom of the node's tx queue */
2797 		if (nlp->nlp_tx[ringno].q_first) {
2798 			((IOCBQ *)nlp->nlp_tx[ringno].q_last)->next = iocbq;
2799 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2800 			nlp->nlp_tx[ringno].q_cnt++;
2801 		} else {
2802 			nlp->nlp_tx[ringno].q_first = (uint8_t *)iocbq;
2803 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2804 			nlp->nlp_tx[ringno].q_cnt = 1;
2805 		}
2806 
2807 		iocbq->next = NULL;
2808 	}
2809 
2810 
2811 	/*
2812 	 * Check if the node is not already on ring queue and
2813 	 * (is not closed or  is a priority request)
2814 	 */
2815 	if (!nlp->nlp_next[ringno] && (!(nlp->nlp_flag[ringno] & NLP_CLOSED) ||
2816 	    (iocbq->flag & IOCB_PRIORITY))) {
2817 		/* If so, then add it to the ring queue */
2818 		if (rp->nodeq.q_first) {
2819 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2820 			    (uint8_t *)nlp;
2821 			nlp->nlp_next[ringno] = rp->nodeq.q_first;
2822 
2823 			/*
2824 			 * If this is not the base node then add it
2825 			 * to the tail
2826 			 */
2827 			if (!nlp->nlp_base) {
2828 				rp->nodeq.q_last = (uint8_t *)nlp;
2829 			} else {	/* Otherwise, add it to the head */
2830 
2831 				/* The command node always gets priority */
2832 				rp->nodeq.q_first = (uint8_t *)nlp;
2833 			}
2834 
2835 			rp->nodeq.q_cnt++;
2836 		} else {
2837 			rp->nodeq.q_first = (uint8_t *)nlp;
2838 			rp->nodeq.q_last = (uint8_t *)nlp;
2839 			nlp->nlp_next[ringno] = nlp;
2840 			rp->nodeq.q_cnt = 1;
2841 		}
2842 	}
2843 
2844 	HBASTATS.IocbTxPut[ringno]++;
2845 
2846 	/* Adjust the ring timeout timer */
2847 	rp->timeout = hba->timer_tics + 5;
2848 
2849 	if (lock) {
2850 		mutex_exit(&EMLXS_RINGTX_LOCK);
2851 	}
2852 
2853 	return;
2854 
2855 }  /* emlxs_tx_put() */
2856 
2857 
2858 extern IOCBQ *
2859 emlxs_tx_get(RING *rp, uint32_t lock)
2860 {
2861 	emlxs_hba_t *hba;
2862 	uint32_t ringno;
2863 	IOCBQ *iocbq;
2864 	NODELIST *nlp;
2865 	emlxs_buf_t *sbp;
2866 
2867 	hba = rp->hba;
2868 	ringno = rp->ringno;
2869 
2870 	if (lock) {
2871 		mutex_enter(&EMLXS_RINGTX_LOCK);
2872 	}
2873 
2874 begin:
2875 
2876 	iocbq = NULL;
2877 
2878 	/* Check if a node needs servicing */
2879 	if (rp->nodeq.q_first) {
2880 		nlp = (NODELIST *)rp->nodeq.q_first;
2881 
2882 		/* Get next iocb from node's priority queue */
2883 
2884 		if (nlp->nlp_ptx[ringno].q_first) {
2885 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
2886 
2887 			/* Check if this is last entry */
2888 			if (nlp->nlp_ptx[ringno].q_last == (void *)iocbq) {
2889 				nlp->nlp_ptx[ringno].q_first = NULL;
2890 				nlp->nlp_ptx[ringno].q_last = NULL;
2891 				nlp->nlp_ptx[ringno].q_cnt = 0;
2892 			} else {
2893 				/* Remove iocb from head */
2894 				nlp->nlp_ptx[ringno].q_first =
2895 				    (void *)iocbq->next;
2896 				nlp->nlp_ptx[ringno].q_cnt--;
2897 			}
2898 
2899 			iocbq->next = NULL;
2900 		}
2901 
2902 		/* Get next iocb from node tx queue if node not closed */
2903 		else if (nlp->nlp_tx[ringno].q_first &&
2904 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED)) {
2905 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
2906 
2907 			/* Check if this is last entry */
2908 			if (nlp->nlp_tx[ringno].q_last == (void *)iocbq) {
2909 				nlp->nlp_tx[ringno].q_first = NULL;
2910 				nlp->nlp_tx[ringno].q_last = NULL;
2911 				nlp->nlp_tx[ringno].q_cnt = 0;
2912 			} else {
2913 				/* Remove iocb from head */
2914 				nlp->nlp_tx[ringno].q_first =
2915 				    (void *)iocbq->next;
2916 				nlp->nlp_tx[ringno].q_cnt--;
2917 			}
2918 
2919 			iocbq->next = NULL;
2920 		}
2921 
2922 		/* Now deal with node itself */
2923 
2924 		/* Check if node still needs servicing */
2925 		if ((nlp->nlp_ptx[ringno].q_first) ||
2926 		    (nlp->nlp_tx[ringno].q_first &&
2927 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
2928 
2929 			/*
2930 			 * If this is the base node, then don't shift the
2931 			 * pointers. We want to drain the base node before
2932 			 * moving on
2933 			 */
2934 			if (!nlp->nlp_base) {
2935 				/*
2936 				 * Just shift ring queue pointers to next
2937 				 * node
2938 				 */
2939 				rp->nodeq.q_last = (void *)nlp;
2940 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2941 			}
2942 		} else {
2943 			/* Remove node from ring queue */
2944 
2945 			/* If this is the last node on list */
2946 			if (rp->nodeq.q_last == (void *)nlp) {
2947 				rp->nodeq.q_last = NULL;
2948 				rp->nodeq.q_first = NULL;
2949 				rp->nodeq.q_cnt = 0;
2950 			} else {
2951 				/* Remove node from head */
2952 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2953 				((NODELIST *)rp->nodeq.q_last)->
2954 				    nlp_next[ringno] = rp->nodeq.q_first;
2955 				rp->nodeq.q_cnt--;
2956 
2957 			}
2958 
2959 			/* Clear node */
2960 			nlp->nlp_next[ringno] = NULL;
2961 		}
2962 
2963 		/*
2964 		 * If no iocbq was found on this node, then it will have
2965 		 * been removed. So try again.
2966 		 */
2967 		if (!iocbq) {
2968 			goto begin;
2969 		}
2970 
2971 		sbp = (emlxs_buf_t *)iocbq->sbp;
2972 
2973 		if (sbp) {
2974 			/*
2975 			 * Check flags before we enter mutex in case this
2976 			 * has been flushed and destroyed
2977 			 */
2978 			if ((sbp->pkt_flags &
2979 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2980 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2981 				goto begin;
2982 			}
2983 
2984 			mutex_enter(&sbp->mtx);
2985 
2986 			if ((sbp->pkt_flags &
2987 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2988 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2989 				mutex_exit(&sbp->mtx);
2990 				goto begin;
2991 			}
2992 
2993 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
2994 			hba->ring_tx_count[ringno]--;
2995 
2996 			mutex_exit(&sbp->mtx);
2997 		}
2998 	}
2999 
3000 	if (iocbq) {
3001 		HBASTATS.IocbTxGet[ringno]++;
3002 	}
3003 
3004 	/* Adjust the ring timeout timer */
3005 	rp->timeout = (rp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3006 
3007 	if (lock) {
3008 		mutex_exit(&EMLXS_RINGTX_LOCK);
3009 	}
3010 
3011 	return (iocbq);
3012 
3013 }  /* emlxs_tx_get() */
3014 
3015 
3016 
3017 extern uint32_t
3018 emlxs_chipq_node_flush(emlxs_port_t *port, RING *ring, NODELIST *ndlp,
3019     emlxs_buf_t *fpkt)
3020 {
3021 	emlxs_hba_t *hba = HBA;
3022 	emlxs_buf_t *sbp;
3023 	IOCBQ *iocbq;
3024 	IOCBQ *next;
3025 	Q abort;
3026 	RING *rp;
3027 	uint32_t ringno;
3028 	uint8_t flag[MAX_RINGS];
3029 	uint32_t iotag;
3030 
3031 	bzero((void *)&abort, sizeof (Q));
3032 	bzero((void *)flag, sizeof (flag));
3033 
3034 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3035 		rp = &hba->ring[ringno];
3036 
3037 		if (ring && rp != ring) {
3038 			continue;
3039 		}
3040 
3041 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3042 
3043 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3044 			sbp = rp->fc_table[iotag];
3045 
3046 			if (sbp && (sbp != STALE_PACKET) &&
3047 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3048 			    (sbp->node == ndlp) &&
3049 			    (sbp->ring == rp) &&
3050 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3051 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3052 				    fpkt);
3053 			}
3054 
3055 		}
3056 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3057 
3058 	}	/* for */
3059 
3060 	/* Now put the iocb's on the tx queue */
3061 	iocbq = (IOCBQ *)abort.q_first;
3062 	while (iocbq) {
3063 		/* Save the next iocbq for now */
3064 		next = (IOCBQ *)iocbq->next;
3065 
3066 		/* Unlink this iocbq */
3067 		iocbq->next = NULL;
3068 
3069 		/* Send this iocbq */
3070 		emlxs_tx_put(iocbq, 1);
3071 
3072 		iocbq = next;
3073 	}
3074 
3075 	/* Now trigger ring service */
3076 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3077 		if (!flag[ringno]) {
3078 			continue;
3079 		}
3080 
3081 		rp = &hba->ring[ringno];
3082 
3083 		emlxs_sli_issue_iocb_cmd(hba, rp, 0);
3084 	}
3085 
3086 	return (abort.q_cnt);
3087 
3088 }  /* emlxs_chipq_node_flush() */
3089 
3090 
3091 /* Flush all IO's left on all iotag lists */
3092 static uint32_t
3093 emlxs_iotag_flush(emlxs_hba_t *hba)
3094 {
3095 	emlxs_port_t *port = &PPORT;
3096 	emlxs_buf_t *sbp;
3097 	IOCBQ *iocbq;
3098 	IOCB *iocb;
3099 	Q abort;
3100 	RING *rp;
3101 	uint32_t ringno;
3102 	uint32_t iotag;
3103 	uint32_t count;
3104 
3105 	count = 0;
3106 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3107 		rp = &hba->ring[ringno];
3108 
3109 		bzero((void *)&abort, sizeof (Q));
3110 
3111 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3112 
3113 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3114 			sbp = rp->fc_table[iotag];
3115 
3116 			if (!sbp || (sbp == STALE_PACKET)) {
3117 				continue;
3118 			}
3119 
3120 			/* Unregister the packet */
3121 			rp->fc_table[iotag] = STALE_PACKET;
3122 			hba->io_count[ringno]--;
3123 			sbp->iotag = 0;
3124 
3125 			/* Clean up the sbp */
3126 			mutex_enter(&sbp->mtx);
3127 
3128 			/* Set IOCB status */
3129 			iocbq = &sbp->iocbq;
3130 			iocb = &iocbq->iocb;
3131 
3132 			iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
3133 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3134 			iocb->ulpLe = 1;
3135 			iocbq->next = NULL;
3136 
3137 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
3138 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
3139 				hba->ring_tx_count[ringno]--;
3140 			}
3141 
3142 			if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3143 				sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3144 			}
3145 
3146 			if (sbp->bmp) {
3147 				(void) emlxs_mem_put(hba, MEM_BPL,
3148 				    (uint8_t *)sbp->bmp);
3149 				sbp->bmp = 0;
3150 			}
3151 
3152 			/* At this point all nodes are assumed destroyed */
3153 			sbp->node = 0;
3154 
3155 			mutex_exit(&sbp->mtx);
3156 
3157 			/* Add this iocb to our local abort Q */
3158 			if (abort.q_first) {
3159 				((IOCBQ *)abort.q_last)->next = iocbq;
3160 				abort.q_last = (uint8_t *)iocbq;
3161 				abort.q_cnt++;
3162 			} else {
3163 				abort.q_first = (uint8_t *)iocbq;
3164 				abort.q_last = (uint8_t *)iocbq;
3165 				abort.q_cnt = 1;
3166 			}
3167 		}
3168 
3169 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3170 
3171 		/* Trigger deferred completion */
3172 		if (abort.q_first) {
3173 			mutex_enter(&rp->rsp_lock);
3174 			if (rp->rsp_head == NULL) {
3175 				rp->rsp_head = (IOCBQ *)abort.q_first;
3176 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3177 			} else {
3178 				rp->rsp_tail->next = (IOCBQ *)abort.q_first;
3179 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3180 			}
3181 			mutex_exit(&rp->rsp_lock);
3182 
3183 			emlxs_thread_trigger2(&rp->intr_thread,
3184 			    emlxs_proc_ring, rp);
3185 
3186 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3187 			    "Forced iotag completion. ring=%d count=%d",
3188 			    ringno, abort.q_cnt);
3189 
3190 			count += abort.q_cnt;
3191 		}
3192 	}
3193 
3194 	return (count);
3195 
3196 }  /* emlxs_iotag_flush() */
3197 
3198 
3199 
3200 /* Checks for IO's on all or a given ring for a given node */
3201 extern uint32_t
3202 emlxs_chipq_node_check(emlxs_port_t *port, RING *ring, NODELIST *ndlp)
3203 {
3204 	emlxs_hba_t *hba = HBA;
3205 	emlxs_buf_t *sbp;
3206 	RING *rp;
3207 	uint32_t ringno;
3208 	uint32_t count;
3209 	uint32_t iotag;
3210 
3211 	count = 0;
3212 
3213 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3214 		rp = &hba->ring[ringno];
3215 
3216 		if (ring && rp != ring) {
3217 			continue;
3218 		}
3219 
3220 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3221 
3222 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3223 			sbp = rp->fc_table[iotag];
3224 
3225 			if (sbp && (sbp != STALE_PACKET) &&
3226 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3227 			    (sbp->node == ndlp) &&
3228 			    (sbp->ring == rp) &&
3229 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3230 				count++;
3231 			}
3232 
3233 		}
3234 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3235 
3236 	}	/* for */
3237 
3238 	return (count);
3239 
3240 }  /* emlxs_chipq_node_check() */
3241 
3242 
3243 
3244 /* Flush all IO's for a given node's lun (FC_FCP_RING only) */
3245 extern uint32_t
3246 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
3247     emlxs_buf_t *fpkt)
3248 {
3249 	emlxs_hba_t *hba = HBA;
3250 	emlxs_buf_t *sbp;
3251 	RING *rp;
3252 	IOCBQ *iocbq;
3253 	IOCBQ *next;
3254 	Q abort;
3255 	uint32_t iotag;
3256 	uint8_t flag[MAX_RINGS];
3257 
3258 	bzero((void *)flag, sizeof (flag));
3259 	bzero((void *)&abort, sizeof (Q));
3260 	rp = &hba->ring[FC_FCP_RING];
3261 
3262 	mutex_enter(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3263 	for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3264 		sbp = rp->fc_table[iotag];
3265 
3266 		if (sbp && (sbp != STALE_PACKET) &&
3267 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3268 		    sbp->node == ndlp &&
3269 		    sbp->ring == rp &&
3270 		    sbp->lun == lun &&
3271 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3272 			emlxs_sbp_abort_add(port, sbp, &abort, flag, fpkt);
3273 		}
3274 	}
3275 	mutex_exit(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3276 
3277 	/* Now put the iocb's on the tx queue */
3278 	iocbq = (IOCBQ *)abort.q_first;
3279 	while (iocbq) {
3280 		/* Save the next iocbq for now */
3281 		next = (IOCBQ *)iocbq->next;
3282 
3283 		/* Unlink this iocbq */
3284 		iocbq->next = NULL;
3285 
3286 		/* Send this iocbq */
3287 		emlxs_tx_put(iocbq, 1);
3288 
3289 		iocbq = next;
3290 	}
3291 
3292 	/* Now trigger ring service */
3293 	if (abort.q_cnt) {
3294 		emlxs_sli_issue_iocb_cmd(hba, rp, 0);
3295 	}
3296 
3297 	return (abort.q_cnt);
3298 
3299 }  /* emlxs_chipq_lun_flush() */
3300 
3301 
3302 
3303 /*
3304  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3305  * This must be called while holding the EMLXS_FCCTAB_LOCK
3306  */
3307 extern IOCBQ *
3308 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3309     uint16_t iotag, RING *rp, uint8_t class, int32_t flag)
3310 {
3311 	emlxs_hba_t *hba = HBA;
3312 	IOCBQ *iocbq;
3313 	IOCB *iocb;
3314 	uint16_t abort_iotag;
3315 
3316 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3317 		return (NULL);
3318 	}
3319 
3320 	iocbq->ring = (void *)rp;
3321 	iocbq->port = (void *)port;
3322 	iocbq->node = (void *)ndlp;
3323 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3324 	iocb = &iocbq->iocb;
3325 
3326 	/*
3327 	 * set up an iotag using special Abort iotags
3328 	 */
3329 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3330 		rp->fc_abort_iotag = rp->max_iotag;
3331 	}
3332 
3333 	abort_iotag = rp->fc_abort_iotag++;
3334 
3335 
3336 	iocb->ulpIoTag = abort_iotag;
3337 	iocb->un.acxri.abortType = flag;
3338 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3339 	iocb->un.acxri.abortIoTag = iotag;
3340 	iocb->ulpLe = 1;
3341 	iocb->ulpClass = class;
3342 	iocb->ulpCommand = CMD_ABORT_XRI_CN;
3343 	iocb->ulpOwner = OWN_CHIP;
3344 
3345 	return (iocbq);
3346 
3347 }  /* emlxs_create_abort_xri_cn() */
3348 
3349 
3350 extern IOCBQ *
3351 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3352     RING *rp, uint8_t class, int32_t flag)
3353 {
3354 	emlxs_hba_t *hba = HBA;
3355 	IOCBQ *iocbq;
3356 	IOCB *iocb;
3357 	uint16_t abort_iotag;
3358 
3359 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3360 		return (NULL);
3361 	}
3362 
3363 	iocbq->ring = (void *)rp;
3364 	iocbq->port = (void *)port;
3365 	iocbq->node = (void *)ndlp;
3366 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3367 	iocb = &iocbq->iocb;
3368 
3369 	/*
3370 	 * set up an iotag using special Abort iotags
3371 	 */
3372 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3373 		rp->fc_abort_iotag = rp->max_iotag;
3374 	}
3375 
3376 	abort_iotag = rp->fc_abort_iotag++;
3377 
3378 	iocb->ulpContext = xid;
3379 	iocb->ulpIoTag = abort_iotag;
3380 	iocb->un.acxri.abortType = flag;
3381 	iocb->ulpLe = 1;
3382 	iocb->ulpClass = class;
3383 	iocb->ulpCommand = CMD_ABORT_XRI_CX;
3384 	iocb->ulpOwner = OWN_CHIP;
3385 
3386 	return (iocbq);
3387 
3388 }  /* emlxs_create_abort_xri_cx() */
3389 
3390 
3391 
3392 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3393 extern IOCBQ *
3394 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3395     uint16_t iotag, RING *rp)
3396 {
3397 	emlxs_hba_t *hba = HBA;
3398 	IOCBQ *iocbq;
3399 	IOCB *iocb;
3400 	uint16_t abort_iotag;
3401 
3402 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3403 		return (NULL);
3404 	}
3405 
3406 	iocbq->ring = (void *)rp;
3407 	iocbq->port = (void *)port;
3408 	iocbq->node = (void *)ndlp;
3409 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3410 	iocb = &iocbq->iocb;
3411 
3412 	/*
3413 	 * set up an iotag using special Abort iotags
3414 	 */
3415 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3416 		rp->fc_abort_iotag = rp->max_iotag;
3417 	}
3418 
3419 	abort_iotag = rp->fc_abort_iotag++;
3420 
3421 	iocb->ulpIoTag = abort_iotag;
3422 	iocb->un.acxri.abortType = 0;
3423 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3424 	iocb->un.acxri.abortIoTag = iotag;
3425 	iocb->ulpLe = 1;
3426 	iocb->ulpClass = 0;
3427 	iocb->ulpCommand = CMD_CLOSE_XRI_CN;
3428 	iocb->ulpOwner = OWN_CHIP;
3429 
3430 	return (iocbq);
3431 
3432 }  /* emlxs_create_close_xri_cn() */
3433 
3434 
3435 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3436 extern IOCBQ *
3437 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3438     RING *rp)
3439 {
3440 	emlxs_hba_t *hba = HBA;
3441 	IOCBQ *iocbq;
3442 	IOCB *iocb;
3443 	uint16_t abort_iotag;
3444 
3445 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3446 		return (NULL);
3447 	}
3448 
3449 	iocbq->ring = (void *)rp;
3450 	iocbq->port = (void *)port;
3451 	iocbq->node = (void *)ndlp;
3452 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3453 	iocb = &iocbq->iocb;
3454 
3455 	/*
3456 	 * set up an iotag using special Abort iotags
3457 	 */
3458 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3459 		rp->fc_abort_iotag = rp->max_iotag;
3460 	}
3461 
3462 	abort_iotag = rp->fc_abort_iotag++;
3463 
3464 	iocb->ulpContext = xid;
3465 	iocb->ulpIoTag = abort_iotag;
3466 	iocb->ulpLe = 1;
3467 	iocb->ulpClass = 0;
3468 	iocb->ulpCommand = CMD_CLOSE_XRI_CX;
3469 	iocb->ulpOwner = OWN_CHIP;
3470 
3471 	return (iocbq);
3472 
3473 }  /* emlxs_create_close_xri_cx() */
3474 
3475 
3476 void
3477 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
3478 {
3479 	RING *rp;
3480 	IOCBQ *iocbq;
3481 
3482 	rp = &hba->ring[FC_CT_RING];
3483 
3484 	/* Create the abort IOCB */
3485 	if (hba->state >= FC_LINK_UP) {
3486 		iocbq =
3487 		    emlxs_create_abort_xri_cx(port, NULL, rxid, rp, CLASS3,
3488 		    ABORT_TYPE_ABTS);
3489 	} else {
3490 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, rp);
3491 	}
3492 	if (iocbq) {
3493 		emlxs_sli_issue_iocb_cmd(hba, rp, iocbq);
3494 	}
3495 }
3496 
3497 
3498 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3499 static void
3500 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
3501     uint8_t *flag, emlxs_buf_t *fpkt)
3502 {
3503 	emlxs_hba_t *hba = HBA;
3504 	IOCBQ *iocbq;
3505 	RING *rp;
3506 	NODELIST *ndlp;
3507 
3508 	rp = (RING *)sbp->ring;
3509 	ndlp = sbp->node;
3510 
3511 	/* Create the close XRI IOCB */
3512 	iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, rp);
3513 
3514 	/*
3515 	 * Add this iocb to our local abort Q
3516 	 * This way we don't hold the CHIPQ lock too long
3517 	 */
3518 	if (iocbq) {
3519 		if (abort->q_first) {
3520 			((IOCBQ *)abort->q_last)->next = iocbq;
3521 			abort->q_last = (uint8_t *)iocbq;
3522 			abort->q_cnt++;
3523 		} else {
3524 			abort->q_first = (uint8_t *)iocbq;
3525 			abort->q_last = (uint8_t *)iocbq;
3526 			abort->q_cnt = 1;
3527 		}
3528 		iocbq->next = NULL;
3529 	}
3530 
3531 	/* set the flags */
3532 	mutex_enter(&sbp->mtx);
3533 
3534 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
3535 	sbp->ticks = hba->timer_tics + 10;
3536 	sbp->abort_attempts++;
3537 
3538 	flag[rp->ringno] = 1;
3539 
3540 	/*
3541 	 * If the fpkt is already set, then we will leave it alone
3542 	 * This ensures that this pkt is only accounted for on one
3543 	 * fpkt->flush_count
3544 	 */
3545 	if (!sbp->fpkt && fpkt) {
3546 		mutex_enter(&fpkt->mtx);
3547 		sbp->fpkt = fpkt;
3548 		fpkt->flush_count++;
3549 		mutex_exit(&fpkt->mtx);
3550 	}
3551 
3552 	mutex_exit(&sbp->mtx);
3553 
3554 	return;
3555 
3556 }  /* emlxs_sbp_abort_add() */
3557