1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #include "emlxs.h"
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
37     uint8_t *flag, emlxs_buf_t *fpkt);
38 static uint32_t emlxs_iotag_flush(emlxs_hba_t *hba);
39 
40 /*
41  * This routine copies data from src then potentially swaps the destination to
42  * big endian. Assumes cnt is a multiple of * sizeof(uint32_t).
43  */
44 extern void
45 emlxs_pcimem_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
46 {
47 	uint32_t ldata;
48 	int32_t i;
49 
50 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
51 		ldata = *src++;
52 		ldata = PCIMEM_LONG(ldata);
53 		*dest++ = ldata;
54 	}
55 } /* emlxs_pcimem_bcopy */
56 
57 
58 /*
59  * This routine copies data from src then swaps the destination to big endian.
60  * Assumes cnt is a multiple of sizeof(uint32_t).
61  */
62 extern void
63 emlxs_swap_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
64 {
65 	uint32_t ldata;
66 	int32_t i;
67 
68 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
69 		ldata = *src++;
70 		ldata = SWAP_DATA32(ldata);
71 		*dest++ = ldata;
72 	}
73 } /* End fc_swap_bcopy */
74 
75 
76 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
77 #define	SCSI_INQUIRY	0x12
78 #define	SCSI_RX_DIAG    0x1C
79 
80 
81 /*
82  *  emlxs_handle_fcp_event
83  *
84  *  Description: Process an FCP Rsp Ring completion
85  *
86  */
87 /* ARGSUSED */
88 extern void
89 emlxs_handle_fcp_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
90 {
91 	emlxs_port_t *port = &PPORT;
92 	IOCB *cmd;
93 	emlxs_buf_t *sbp;
94 	fc_packet_t *pkt = NULL;
95 	uint32_t iostat;
96 	uint8_t localstat;
97 	fcp_rsp_t *rsp;
98 	uint32_t rsp_data_resid;
99 	uint32_t check_underrun;
100 	uint8_t asc;
101 	uint8_t ascq;
102 	uint8_t scsi_status;
103 	uint8_t sense;
104 	uint32_t did;
105 	uint32_t fix_it;
106 	uint8_t *scsi_cmd;
107 	uint8_t scsi_opcode;
108 	uint16_t scsi_dl;
109 	uint32_t data_rx;
110 
111 	cmd = &iocbq->iocb;
112 
113 	/* Initialize the status */
114 	iostat = cmd->ulpStatus;
115 	localstat = 0;
116 	scsi_status = 0;
117 	asc = 0;
118 	ascq = 0;
119 	sense = 0;
120 	check_underrun = 0;
121 	fix_it = 0;
122 
123 	HBASTATS.FcpEvent++;
124 
125 	sbp = (emlxs_buf_t *)iocbq->sbp;
126 
127 	if (!sbp) {
128 		/* completion with missing xmit command */
129 		HBASTATS.FcpStray++;
130 
131 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
132 		    "cmd=%x iotag=%x",
133 		    cmd->ulpCommand, cmd->ulpIoTag);
134 
135 		return;
136 	}
137 	HBASTATS.FcpCompleted++;
138 
139 	pkt = PRIV2PKT(sbp);
140 
141 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
142 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
143 	scsi_opcode = scsi_cmd[12];
144 	data_rx = 0;
145 
146 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
147 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
148 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
149 		    DDI_DMA_SYNC_FORKERNEL);
150 
151 #ifdef TEST_SUPPORT
152 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
153 		    (pkt->pkt_datalen >= 512)) {
154 			hba->underrun_counter--;
155 			iostat = IOSTAT_FCP_RSP_ERROR;
156 
157 			/* Report 512 bytes missing by adapter */
158 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
159 
160 			/* Corrupt 512 bytes of Data buffer */
161 			bzero((uint8_t *)pkt->pkt_data, 512);
162 
163 			/* Set FCP response to STATUS_GOOD */
164 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
165 		}
166 #endif	/* TEST_SUPPORT */
167 	}
168 	/* Process the pkt */
169 	mutex_enter(&sbp->mtx);
170 
171 	/* Check for immediate return */
172 	if ((iostat == IOSTAT_SUCCESS) &&
173 	    (pkt->pkt_comp) &&
174 	    !(sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED |
175 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
176 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
177 	    PACKET_IN_ABORT | PACKET_POLLED))) {
178 		HBASTATS.FcpGood++;
179 
180 		sbp->pkt_flags |= (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
181 		    PACKET_COMPLETED | PACKET_RETURNED);
182 		mutex_exit(&sbp->mtx);
183 
184 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
185 		emlxs_unswap_pkt(sbp);
186 #endif	/* EMLXS_MODREV2X */
187 
188 		(*pkt->pkt_comp) (pkt);
189 
190 		return;
191 	}
192 	/*
193 	 * A response is only placed in the resp buffer if
194 	 * IOSTAT_FCP_RSP_ERROR is reported
195 	 */
196 
197 	/* Check if a response buffer was provided */
198 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
199 		emlxs_mpdata_sync(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
200 		    DDI_DMA_SYNC_FORKERNEL);
201 
202 		/* Get the response buffer pointer */
203 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
204 
205 		/* Set the valid response flag */
206 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
207 
208 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
209 
210 		/*
211 		 * Convert a task abort to a check condition with no data
212 		 * transferred
213 		 */
214 		/*
215 		 * We saw a data corruption when Solaris received a Task
216 		 * Abort from a tape
217 		 */
218 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
219 			EMLXS_MSGF(EMLXS_CONTEXT,
220 			    &emlxs_fcp_completion_error_msg,
221 			    "Task Abort. Fixed. "
222 			    "did=0x%06x sbp=%p cmd=%02x dl=%d",
223 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
224 
225 			rsp->fcp_u.fcp_status.scsi_status =
226 			    SCSI_STAT_CHECK_COND;
227 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
228 			rsp->fcp_u.fcp_status.sense_len_set = 0;
229 			rsp->fcp_u.fcp_status.resid_over = 0;
230 
231 			if (pkt->pkt_datalen) {
232 				rsp->fcp_u.fcp_status.resid_under = 1;
233 				rsp->fcp_resid = SWAP_DATA32(pkt->pkt_datalen);
234 			} else {
235 				rsp->fcp_u.fcp_status.resid_under = 0;
236 				rsp->fcp_resid = 0;
237 			}
238 
239 			scsi_status = SCSI_STAT_CHECK_COND;
240 		}
241 		/*
242 		 * We only need to check underrun if data could have been
243 		 * sent
244 		 */
245 
246 		/* Always check underrun if status is good */
247 		if (scsi_status == SCSI_STAT_GOOD) {
248 			check_underrun = 1;
249 		}
250 		/* Check the sense codes if this is a check condition */
251 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
252 			check_underrun = 1;
253 
254 			/* Check if sense data was provided */
255 			if (SWAP_DATA32(rsp->fcp_sense_len) >= 14) {
256 				sense = *((uint8_t *)rsp + 32 + 2);
257 				asc = *((uint8_t *)rsp + 32 + 12);
258 				ascq = *((uint8_t *)rsp + 32 + 13);
259 			}
260 		}
261 		/* Status is not good and this is not a check condition */
262 		/* No data should have been sent */
263 		else {
264 			check_underrun = 0;
265 		}
266 
267 		/* Get the residual underrun count reported by the SCSI reply */
268 		rsp_data_resid = (pkt->pkt_datalen &&
269 		    rsp->fcp_u.fcp_status.resid_under)
270 		    ? SWAP_DATA32(rsp->fcp_resid) : 0;
271 
272 		/* Set the pkt resp_resid field */
273 		pkt->pkt_resp_resid = 0;
274 
275 		/* Set the pkt data_resid field */
276 		if (pkt->pkt_datalen &&
277 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
278 			/*
279 			 * Get the residual underrun count reported by our
280 			 * adapter
281 			 */
282 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
283 
284 			/* Get the actual amount of data transferred */
285 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
286 
287 			/*
288 			 * If the residual being reported by the adapter is
289 			 * greater than the residual being reported in the
290 			 * reply, then we have a true underrun.
291 			 */
292 			if (check_underrun &&
293 			    (pkt->pkt_data_resid > rsp_data_resid)) {
294 				switch (scsi_opcode) {
295 				case SCSI_INQUIRY:
296 					scsi_dl = scsi_cmd[16];
297 					break;
298 
299 				case SCSI_RX_DIAG:
300 					scsi_dl = (scsi_cmd[15] * 0x100) +
301 					    scsi_cmd[16];
302 					break;
303 
304 				default:
305 					scsi_dl = pkt->pkt_datalen;
306 				}
307 
308 #ifdef FCP_UNDERRUN_PATCH1
309 				/*
310 				 * If status is not good and no data was
311 				 * actually transferred, then we must fix the
312 				 * issue
313 				 */
314 				if ((scsi_status != SCSI_STAT_GOOD) &&
315 				    (data_rx == 0)) {
316 					fix_it = 1;
317 
318 					EMLXS_MSGF(EMLXS_CONTEXT,
319 					    &emlxs_fcp_completion_error_msg,
320 					    "Underrun(1). Fixed. did=0x%06x "
321 					    "sbp=%p cmd=%02x dl=%d,%d rx=%d "
322 					    "rsp=%d",
323 					    did, sbp, scsi_opcode,
324 					    pkt->pkt_datalen, scsi_dl,
325 					    (pkt->pkt_datalen -
326 					    cmd->un.fcpi.fcpi_parm),
327 					    rsp_data_resid);
328 
329 				}
330 #endif	/* FCP_UNDERRUN_PATCH1 */
331 
332 
333 #ifdef FCP_UNDERRUN_PATCH2
334 				if ((scsi_status == SCSI_STAT_GOOD)) {
335 					emlxs_msg_t *msg;
336 
337 					msg = &emlxs_fcp_completion_error_msg;
338 					/*
339 					 * If status is good and this is an
340 					 * inquiry request and the amount of
341 					 * data
342 					 */
343 					/*
344 					 * requested <= data received, then
345 					 * we must fix the issue.
346 					 */
347 
348 					if ((scsi_opcode == SCSI_INQUIRY) &&
349 					    (pkt->pkt_datalen >= data_rx) &&
350 					    (scsi_dl <= data_rx)) {
351 						fix_it = 1;
352 
353 						EMLXS_MSGF(EMLXS_CONTEXT,
354 						    msg,
355 						    "Underrun(2). Fixed. "
356 						    "did=0x%06x sbp=%p "
357 						    "cmd=%02x dl=%d,%d "
358 						    "rx=%d rsp=%d",
359 						    did, sbp, scsi_opcode,
360 						    pkt->pkt_datalen, scsi_dl,
361 						    data_rx, rsp_data_resid);
362 
363 					}
364 					/*
365 					 * If status is good and this is an
366 					 * inquiry request and the amount of
367 					 * data
368 					 */
369 					/*
370 					 * requested >= 128 bytes, but only
371 					 * 128 bytes were received,
372 					 */
373 					/* then we must fix the issue. */
374 					else if ((scsi_opcode == SCSI_INQUIRY)&&
375 					    (pkt->pkt_datalen >= 128) &&
376 					    (scsi_dl >= 128) &&
377 					    (data_rx == 128)) {
378 						fix_it = 1;
379 
380 						EMLXS_MSGF(EMLXS_CONTEXT,
381 						    msg,
382 						    "Underrun(3). Fixed. "
383 						    "did=0x%06x sbp=%p "
384 						    "cmd=%02x dl=%d,%d rx=%d "
385 						    "rsp=%d",
386 						    did, sbp, scsi_opcode,
387 						    pkt->pkt_datalen, scsi_dl,
388 						    data_rx, rsp_data_resid);
389 
390 					}
391 				}
392 #endif	/* FCP_UNDERRUN_PATCH2 */
393 
394 				/*
395 				 * Check if SCSI response payload should be
396 				 * fixed or
397 				 */
398 				/* if a DATA_UNDERRUN should be reported */
399 				if (fix_it) {
400 					/*
401 					 * Fix the SCSI response payload
402 					 * itself
403 					 */
404 					rsp->fcp_u.fcp_status.resid_under = 1;
405 					rsp->fcp_resid =
406 					    SWAP_DATA32(pkt->pkt_data_resid);
407 				} else {
408 					/*
409 					 * Change the status from
410 					 * IOSTAT_FCP_RSP_ERROR to
411 					 * IOSTAT_DATA_UNDERRUN
412 					 */
413 					iostat = IOSTAT_DATA_UNDERRUN;
414 					pkt->pkt_data_resid = pkt->pkt_datalen;
415 				}
416 			}
417 			/*
418 			 * If the residual being reported by the adapter is
419 			 * less than the residual being reported in the
420 			 * reply, then we have a true overrun. Since we don't
421 			 * know where the extra data came from or went to
422 			 * then we cannot trust anything we received
423 			 */
424 			else if (rsp_data_resid > pkt->pkt_data_resid) {
425 				/*
426 				 * Change the status from
427 				 * IOSTAT_FCP_RSP_ERROR to
428 				 * IOSTAT_DATA_OVERRUN
429 				 */
430 				iostat = IOSTAT_DATA_OVERRUN;
431 				pkt->pkt_data_resid = pkt->pkt_datalen;
432 			}
433 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
434 			/* Report whatever the target reported */
435 			pkt->pkt_data_resid = rsp_data_resid;
436 		}
437 	}
438 	/*
439 	 * If pkt is tagged for timeout then set the return codes
440 	 * appropriately
441 	 */
442 	if (sbp->pkt_flags & PACKET_IN_TIMEOUT) {
443 		iostat = IOSTAT_LOCAL_REJECT;
444 		localstat = IOERR_ABORT_TIMEOUT;
445 		goto done;
446 	}
447 	/* If pkt is tagged for abort then set the return codes appropriately */
448 	if (sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_IN_ABORT)) {
449 		iostat = IOSTAT_LOCAL_REJECT;
450 		localstat = IOERR_ABORT_REQUESTED;
451 		goto done;
452 	}
453 	/* Print completion message */
454 	switch (iostat) {
455 	case IOSTAT_SUCCESS:
456 		/* Build SCSI GOOD status */
457 		if (pkt->pkt_rsplen) {
458 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
459 		}
460 		break;
461 
462 	case IOSTAT_FCP_RSP_ERROR:
463 		break;
464 
465 	case IOSTAT_REMOTE_STOP:
466 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
467 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x",
468 		    did, sbp, scsi_opcode);
469 		break;
470 
471 	case IOSTAT_LOCAL_REJECT:
472 		localstat = cmd->un.grsp.perr.statLocalError;
473 
474 		switch (localstat) {
475 		case IOERR_SEQUENCE_TIMEOUT:
476 			EMLXS_MSGF(EMLXS_CONTEXT,
477 			    &emlxs_fcp_completion_error_msg,
478 			    "Local reject. %s did=0x%06x sbp=%p "
479 			    "cmd=%02x tmo=%d ",
480 			    emlxs_error_xlate(localstat), did, sbp,
481 			    scsi_opcode, pkt->pkt_timeout);
482 			break;
483 
484 		default:
485 			EMLXS_MSGF(EMLXS_CONTEXT,
486 			    &emlxs_fcp_completion_error_msg,
487 			    "Local reject. %s did=0x%06x sbp=%p cmd=%02x",
488 			    emlxs_error_xlate(localstat), did,
489 			    sbp, scsi_opcode);
490 		}
491 
492 		break;
493 
494 	case IOSTAT_NPORT_RJT:
495 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
496 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x",
497 		    did, sbp, scsi_opcode);
498 		break;
499 
500 	case IOSTAT_FABRIC_RJT:
501 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
502 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x",
503 		    did, sbp, scsi_opcode);
504 		break;
505 
506 	case IOSTAT_NPORT_BSY:
507 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
508 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x",
509 		    did, sbp, scsi_opcode);
510 		break;
511 
512 	case IOSTAT_FABRIC_BSY:
513 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
514 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x",
515 		    did, sbp, scsi_opcode);
516 		break;
517 
518 	case IOSTAT_INTERMED_RSP:
519 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
520 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x",
521 		    did, sbp, scsi_opcode);
522 		break;
523 
524 	case IOSTAT_LS_RJT:
525 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
526 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x",
527 		    did, sbp, scsi_opcode);
528 		break;
529 
530 	case IOSTAT_DATA_UNDERRUN:
531 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
532 		    "Underrun. did=0x%06x sbp=%p cmd=%02x dl=%d,%d rx=%d "
533 		    "rsp=%d (%02x,%02x,%02x,%02x)",
534 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl,
535 		    data_rx, rsp_data_resid, scsi_status, sense, asc, ascq);
536 		break;
537 
538 	case IOSTAT_DATA_OVERRUN:
539 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
540 		    "Overrun. did=0x%06x sbp=%p cmd=%02x dl=%d,%d rx=%d "
541 		    "rsp=%d (%02x,%02x,%02x,%02x)",
542 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl,
543 		    data_rx, rsp_data_resid, scsi_status, sense, asc, ascq);
544 		break;
545 
546 	default:
547 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
548 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
549 		    iostat, cmd->un.grsp.perr.statLocalError, did,
550 		    sbp, scsi_opcode);
551 		break;
552 	}
553 
554 done:
555 
556 	if (iostat == IOSTAT_SUCCESS) {
557 		HBASTATS.FcpGood++;
558 	} else {
559 		HBASTATS.FcpError++;
560 	}
561 
562 	mutex_exit(&sbp->mtx);
563 
564 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
565 
566 	return;
567 
568 } /* emlxs_handle_fcp_event() */
569 
570 
571 
572 /*
573  *  emlxs_post_buffer
574  *
575  *  This routine will post count buffers to the
576  *  ring with the QUE_RING_BUF_CN command. This
577  *  allows 2 buffers / command to be posted.
578  *  Returns the number of buffers NOT posted.
579  */
580 extern int
581 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
582 {
583 	emlxs_port_t *port = &PPORT;
584 	IOCB *icmd;
585 	IOCBQ *iocbq;
586 	MATCHMAP *mp;
587 	uint16_t tag;
588 	uint32_t maxqbuf;
589 	int32_t i;
590 	int32_t j;
591 	uint32_t seg;
592 	uint32_t size;
593 
594 	mp = 0;
595 	maxqbuf = 2;
596 	tag = (uint16_t)cnt;
597 	cnt += rp->fc_missbufcnt;
598 
599 	if (rp->ringno == FC_ELS_RING) {
600 		seg = MEM_BUF;
601 		size = MEM_ELSBUF_SIZE;
602 	} else if (rp->ringno == FC_IP_RING) {
603 		seg = MEM_IPBUF;
604 		size = MEM_IPBUF_SIZE;
605 	} else if (rp->ringno == FC_CT_RING) {
606 		seg = MEM_CTBUF;
607 		size = MEM_CTBUF_SIZE;
608 	}
609 #ifdef SFCT_SUPPORT
610 	else if (rp->ringno == FC_FCT_RING) {
611 		seg = MEM_FCTBUF;
612 		size = MEM_FCTBUF_SIZE;
613 	}
614 #endif	/* SFCT_SUPPORT */
615 	else {
616 		return (0);
617 	}
618 
619 	/*
620 	 * While there are buffers to post
621 	 */
622 	while (cnt) {
623 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
624 			rp->fc_missbufcnt = cnt;
625 			return (cnt);
626 		}
627 		iocbq->ring = (void *)rp;
628 		iocbq->port = (void *)port;
629 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
630 
631 		icmd = &iocbq->iocb;
632 
633 		/*
634 		 * Max buffers can be posted per command
635 		 */
636 		for (i = 0; i < maxqbuf; i++) {
637 			if (cnt <= 0)
638 				break;
639 
640 			/* fill in BDEs for command */
641 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg)) == 0) {
642 				uint32_t H;
643 				uint32_t L;
644 
645 				icmd->ulpBdeCount = i;
646 				for (j = 0; j < i; j++) {
647 					H = icmd->un.cont64[j].addrHigh;
648 					L = icmd->un.cont64[j].addrLow;
649 					mp = emlxs_mem_get_vaddr(hba, rp,
650 					    getPaddr(H, L));
651 					if (mp) {
652 						(void) emlxs_mem_put(hba, seg,
653 						    (uint8_t *)mp);
654 					}
655 				}
656 
657 				rp->fc_missbufcnt = cnt + i;
658 
659 				(void) emlxs_mem_put(hba, MEM_IOCB,
660 				    (uint8_t *)iocbq);
661 
662 				return (cnt + i);
663 			}
664 			/*
665 			 * map that page and save the address pair for lookup
666 			 * later
667 			 */
668 			emlxs_mem_map_vaddr(hba, rp, mp,
669 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
670 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
671 
672 			icmd->un.cont64[i].tus.f.bdeSize = size;
673 			icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
674 
675 /*
676  *			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
677  *			    "UB Post: ring=%d addr=%08x%08x size=%d",
678  *			    rp->ringno, icmd->un.cont64[i].addrHigh,
679  *			    icmd->un.cont64[i].addrLow, size);
680  */
681 
682 			cnt--;
683 		}
684 
685 		icmd->ulpIoTag = tag;
686 		icmd->ulpBdeCount = i;
687 		icmd->ulpLe = 1;
688 		icmd->ulpOwner = OWN_CHIP;
689 		iocbq->bp = (uint8_t *)mp;  /* used for delimiter between */
690 					    /* commands */
691 
692 		emlxs_issue_iocb_cmd(hba, rp, iocbq);
693 	}
694 
695 	rp->fc_missbufcnt = 0;
696 
697 	return (0);
698 
699 } /* emlxs_post_buffer() */
700 
701 
702 extern int
703 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
704 {
705 	emlxs_hba_t *hba = HBA;
706 	emlxs_config_t *cfg;
707 	NODELIST *nlp;
708 	fc_affected_id_t *aid;
709 	uint32_t mask;
710 	uint32_t aff_d_id;
711 	uint32_t linkdown;
712 	uint32_t vlinkdown;
713 	uint32_t action;
714 	int i;
715 	uint32_t unreg_vpi;
716 	uint32_t update;
717 	uint32_t adisc_support;
718 
719 	/* Target mode only uses this routine for linkdowns */
720 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
721 		return (0);
722 	}
723 	cfg = &CFG;
724 	aid = (fc_affected_id_t *)&scope;
725 	linkdown = 0;
726 	vlinkdown = 0;
727 	unreg_vpi = 0;
728 	update = 0;
729 
730 	if (!(port->flag & EMLXS_PORT_BOUND)) {
731 		return (0);
732 	}
733 	switch (aid->aff_format) {
734 	case 0:	/* Port */
735 		mask = 0x00ffffff;
736 		break;
737 
738 	case 1:	/* Area */
739 		mask = 0x00ffff00;
740 		break;
741 
742 	case 2:	/* Domain */
743 		mask = 0x00ff0000;
744 		break;
745 
746 	case 3:	/* Network */
747 		mask = 0x00000000;
748 		break;
749 
750 #ifdef DHCHAP_SUPPORT
751 	case 0xfe:	/* Virtual link down */
752 		mask = 0x00000000;
753 		vlinkdown = 1;
754 		break;
755 #endif	/* DHCHAP_SUPPORT */
756 
757 	case 0xff:	/* link is down */
758 		mask = 0x00000000;
759 		linkdown = 1;
760 		break;
761 
762 	}
763 
764 	aff_d_id = aid->aff_d_id & mask;
765 
766 
767 	/* If link is down then this is a hard shutdown and flush */
768 	/*
769 	 * If link not down then this is a soft shutdown and flush (e.g.
770 	 * RSCN)
771 	 */
772 	if (linkdown) {
773 		mutex_enter(&EMLXS_PORT_LOCK);
774 
775 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
776 		port->prev_did = port->did;
777 		port->did = 0;
778 
779 		if (port->ulp_statec != FC_STATE_OFFLINE) {
780 			port->ulp_statec = FC_STATE_OFFLINE;
781 			update = 1;
782 		}
783 		mutex_exit(&EMLXS_PORT_LOCK);
784 
785 		/* Tell ULP about it */
786 		if (update) {
787 			if (port->flag & EMLXS_PORT_BOUND) {
788 				if (port->vpi == 0) {
789 					EMLXS_MSGF(EMLXS_CONTEXT,
790 					    &emlxs_link_down_msg,
791 					    NULL);
792 				}
793 #ifdef SFCT_SUPPORT
794 				if (port->tgt_mode) {
795 					emlxs_fct_link_down(port);
796 
797 				} else if (port->ini_mode) {
798 					port->ulp_statec_cb(port->ulp_handle,
799 					    FC_STATE_OFFLINE);
800 				}
801 #else
802 				port->ulp_statec_cb(port->ulp_handle,
803 				    FC_STATE_OFFLINE);
804 #endif	/* SFCT_SUPPORT */
805 			} else {
806 				if (port->vpi == 0) {
807 					EMLXS_MSGF(EMLXS_CONTEXT,
808 					    &emlxs_link_down_msg,
809 					    "*");
810 				}
811 			}
812 
813 
814 		}
815 		unreg_vpi = 1;
816 
817 #ifdef DHCHAP_SUPPORT
818 		/* Stop authentication with all nodes */
819 		emlxs_dhc_auth_stop(port, NULL);
820 #endif	/* DHCHAP_SUPPORT */
821 
822 		/* Flush the base node */
823 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
824 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
825 
826 		/* Flush any pending ub buffers */
827 		emlxs_ub_flush(port);
828 	}
829 #ifdef DHCHAP_SUPPORT
830 	/* virtual link down */
831 	else if (vlinkdown) {
832 		mutex_enter(&EMLXS_PORT_LOCK);
833 
834 		if (port->ulp_statec != FC_STATE_OFFLINE) {
835 			port->ulp_statec = FC_STATE_OFFLINE;
836 			update = 1;
837 		}
838 		mutex_exit(&EMLXS_PORT_LOCK);
839 
840 		/* Tell ULP about it */
841 		if (update) {
842 			if (port->flag & EMLXS_PORT_BOUND) {
843 				if (port->vpi == 0) {
844 					EMLXS_MSGF(EMLXS_CONTEXT,
845 					    &emlxs_link_down_msg,
846 					    "Switch authentication failed.");
847 				}
848 #ifdef SFCT_SUPPORT
849 				if (port->tgt_mode) {
850 					emlxs_fct_link_down(port);
851 				} else if (port->ini_mode) {
852 					port->ulp_statec_cb(port->ulp_handle,
853 					    FC_STATE_OFFLINE);
854 				}
855 #else
856 				port->ulp_statec_cb(port->ulp_handle,
857 				    FC_STATE_OFFLINE);
858 #endif	/* SFCT_SUPPORT */
859 			} else {
860 				if (port->vpi == 0) {
861 					EMLXS_MSGF(EMLXS_CONTEXT,
862 					    &emlxs_link_down_msg,
863 					    "Switch authentication failed. *");
864 				}
865 			}
866 
867 
868 		}
869 		/* Flush the base node */
870 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
871 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
872 	}
873 #endif	/* DHCHAP_SUPPORT */
874 
875 	if (port->tgt_mode) {
876 		goto done;
877 	}
878 	/* Set the node tags */
879 	/* We will process all nodes with this tag */
880 	rw_enter(&port->node_rwlock, RW_READER);
881 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
882 		nlp = port->node_table[i];
883 		while (nlp != NULL) {
884 			nlp->nlp_tag = 1;
885 			nlp = nlp->nlp_list_next;
886 		}
887 	}
888 	rw_exit(&port->node_rwlock);
889 
890 	if (hba->flag & FC_ONLINE_MODE) {
891 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
892 	} else {
893 		adisc_support = 0;
894 	}
895 
896 	/* Check ADISC support level */
897 	switch (adisc_support) {
898 	case 0:	/* No support - Flush all IO to all matching nodes */
899 
900 		for (; ; ) {
901 			/*
902 			 * We need to hold the locks this way because
903 			 * emlxs_mb_unreg_did and the flush routines enter
904 			 * the same locks. Also, when we release the lock the
905 			 * list can change out from under us.
906 			 */
907 
908 			/* Find first node */
909 			rw_enter(&port->node_rwlock, RW_READER);
910 			action = 0;
911 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
912 				nlp = port->node_table[i];
913 				while (nlp != NULL) {
914 					if (!nlp->nlp_tag) {
915 						nlp = nlp->nlp_list_next;
916 						continue;
917 					}
918 					nlp->nlp_tag = 0;
919 
920 					/*
921 					 * Check for any device that matches
922 					 * our mask
923 					 */
924 					if ((nlp->nlp_DID & mask) == aff_d_id) {
925 						if (linkdown) {
926 							action = 1;
927 							break;
928 						} else {  /* Must be an RCSN */
929 							action = 2;
930 							break;
931 						}
932 					}
933 					nlp = nlp->nlp_list_next;
934 				}
935 
936 				if (action) {
937 					break;
938 				}
939 			}
940 			rw_exit(&port->node_rwlock);
941 
942 
943 			/* Check if nothing was found */
944 			if (action == 0) {
945 				break;
946 			} else if (action == 1) {
947 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
948 				    NULL, NULL, NULL);
949 			} else if (action == 2) {
950 #ifdef DHCHAP_SUPPORT
951 				emlxs_dhc_auth_stop(port, nlp);
952 #endif	/* DHCHAP_SUPPORT */
953 
954 				/* Close the node for any further normal IO */
955 				/* A PLOGI with reopen the node */
956 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
957 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
958 
959 				/* Flush tx queue */
960 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
961 
962 				/* Flush chip queue */
963 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
964 			}
965 		}
966 
967 		break;
968 
969 	case 1:	/* Partial support - Flush IO for non-FCP2 matching * nodes */
970 
971 		for (;;) {
972 
973 			/*
974 			 * We need to hold the locks this way because
975 			 * emlxs_mb_unreg_did and the flush routines enter
976 			 * the same locks. Also, when we release the lock the
977 			 * list can change out from under us.
978 			 */
979 			rw_enter(&port->node_rwlock, RW_READER);
980 			action = 0;
981 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
982 				nlp = port->node_table[i];
983 				while (nlp != NULL) {
984 					if (!nlp->nlp_tag) {
985 						nlp = nlp->nlp_list_next;
986 						continue;
987 					}
988 					nlp->nlp_tag = 0;
989 
990 					/*
991 					 * Check for special FCP2 target
992 					 * device that matches our mask
993 					 */
994 					if ((nlp->nlp_fcp_info &
995 					    NLP_FCP_TGT_DEVICE) &&
996 					    (nlp->nlp_fcp_info &
997 					    NLP_FCP_2_DEVICE) &&
998 					    (nlp->nlp_DID & mask) == aff_d_id) {
999 						action = 3;
1000 						break;
1001 					}
1002 					/*
1003 					 * Check for any other device that
1004 					 * matches our mask
1005 					 */
1006 					else if ((nlp->nlp_DID & mask) ==
1007 					    aff_d_id) {
1008 						if (linkdown) {
1009 							action = 1;
1010 							break;
1011 						} else {   /* Must be an RSCN */
1012 							action = 2;
1013 							break;
1014 						}
1015 					}
1016 					nlp = nlp->nlp_list_next;
1017 				}
1018 
1019 				if (action) {
1020 					break;
1021 				}
1022 			}
1023 			rw_exit(&port->node_rwlock);
1024 
1025 			/* Check if nothing was found */
1026 			if (action == 0) {
1027 				break;
1028 			} else if (action == 1) {
1029 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1030 				    NULL, NULL, NULL);
1031 			} else if (action == 2) {
1032 #ifdef DHCHAP_SUPPORT
1033 				emlxs_dhc_auth_stop(port, nlp);
1034 #endif	/* DHCHAP_SUPPORT */
1035 
1036 				/* Close the node for any further normal IO */
1037 				/* A PLOGI with reopen the node */
1038 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1039 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1040 
1041 				/* Flush tx queue */
1042 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1043 
1044 				/* Flush chip queue */
1045 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1046 			} else if (action == 3) {	/* FCP2 devices */
1047 				unreg_vpi = 0;
1048 
1049 #ifdef DHCHAP_SUPPORT
1050 				emlxs_dhc_auth_stop(port, nlp);
1051 #endif	/* DHCHAP_SUPPORT */
1052 
1053 				/* Close the node for any further normal IO */
1054 				/* An ADISC or a PLOGI with reopen the node */
1055 				emlxs_node_close(port, nlp, FC_FCP_RING,
1056 				    ((linkdown) ? 0 : 60));
1057 				emlxs_node_close(port, nlp, FC_IP_RING,
1058 				    ((linkdown) ? 0 : 60));
1059 
1060 				/* Flush tx queues except for FCP ring */
1061 				(void) emlxs_tx_node_flush(port, nlp,
1062 				    &hba->ring[FC_CT_RING], 0, 0);
1063 				(void) emlxs_tx_node_flush(port, nlp,
1064 				    &hba->ring[FC_ELS_RING], 0, 0);
1065 				(void) emlxs_tx_node_flush(port, nlp,
1066 				    &hba->ring[FC_IP_RING], 0, 0);
1067 
1068 				/* Clear IP XRI */
1069 				nlp->nlp_Xri = 0;
1070 
1071 				/* Flush chip queues except for FCP ring */
1072 				(void) emlxs_chipq_node_flush(port,
1073 				    &hba->ring[FC_CT_RING], nlp, 0);
1074 				(void) emlxs_chipq_node_flush(port,
1075 				    &hba->ring[FC_ELS_RING], nlp, 0);
1076 				(void) emlxs_chipq_node_flush(port,
1077 				    &hba->ring[FC_IP_RING], nlp, 0);
1078 			}
1079 		}
1080 		break;
1081 
1082 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1083 
1084 		if (!linkdown && !vlinkdown) {
1085 			break;
1086 		}
1087 		for (;;) {
1088 			/*
1089 			 * We need to hold the locks this way because
1090 			 * emlxs_mb_unreg_did and the flush routines enter
1091 			 * the same locks. Also, when we release the lock the
1092 			 * list can change out from under us.
1093 			 */
1094 			rw_enter(&port->node_rwlock, RW_READER);
1095 			action = 0;
1096 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1097 				nlp = port->node_table[i];
1098 				while (nlp != NULL) {
1099 					if (!nlp->nlp_tag) {
1100 						nlp = nlp->nlp_list_next;
1101 						continue;
1102 					}
1103 					nlp->nlp_tag = 0;
1104 
1105 					/*
1106 					 * Check for FCP target device that
1107 					 * matches our mask
1108 					 */
1109 					if ((nlp->nlp_fcp_info &
1110 					    NLP_FCP_TGT_DEVICE) &&
1111 					    (nlp->nlp_DID & mask) == aff_d_id) {
1112 						action = 3;
1113 						break;
1114 					}
1115 					/*
1116 					 * Check for any other device that
1117 					 * matches our mask
1118 					 */
1119 					else if ((nlp->nlp_DID & mask) ==
1120 					    aff_d_id) {
1121 						if (linkdown) {
1122 							action = 1;
1123 							break;
1124 						} else { /* Must be an RSCN */
1125 							action = 2;
1126 							break;
1127 						}
1128 					}
1129 					nlp = nlp->nlp_list_next;
1130 				}
1131 				if (action) {
1132 					break;
1133 				}
1134 			}
1135 			rw_exit(&port->node_rwlock);
1136 
1137 			/* Check if nothing was found */
1138 			if (action == 0) {
1139 				break;
1140 			} else if (action == 1) {
1141 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1142 				    NULL, NULL, NULL);
1143 			} else if (action == 2) {
1144 				/* Close the node for any further normal IO */
1145 				/* A PLOGI with reopen the node */
1146 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1147 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1148 
1149 				/* Flush tx queue */
1150 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1151 
1152 				/* Flush chip queue */
1153 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1154 
1155 			} else if (action == 3) {	/* FCP2 devices */
1156 				unreg_vpi = 0;
1157 
1158 				/* Close the node for any further normal IO */
1159 				/* An ADISC or a PLOGI with reopen the node */
1160 				emlxs_node_close(port, nlp, FC_FCP_RING,
1161 				    ((linkdown) ? 0 : 60));
1162 				emlxs_node_close(port, nlp, FC_IP_RING,
1163 				    ((linkdown) ? 0 : 60));
1164 
1165 				/* Flush tx queues except for FCP ring */
1166 				(void) emlxs_tx_node_flush(port, nlp,
1167 				    &hba->ring[FC_CT_RING], 0, 0);
1168 				(void) emlxs_tx_node_flush(port, nlp,
1169 				    &hba->ring[FC_ELS_RING], 0, 0);
1170 				(void) emlxs_tx_node_flush(port, nlp,
1171 				    &hba->ring[FC_IP_RING], 0, 0);
1172 
1173 				/* Clear IP XRI */
1174 				nlp->nlp_Xri = 0;
1175 
1176 				/* Flush chip queues except for FCP ring */
1177 				(void) emlxs_chipq_node_flush(port,
1178 				    &hba->ring[FC_CT_RING], nlp, 0);
1179 				(void) emlxs_chipq_node_flush(port,
1180 				    &hba->ring[FC_ELS_RING], nlp, 0);
1181 				(void) emlxs_chipq_node_flush(port,
1182 				    &hba->ring[FC_IP_RING], nlp, 0);
1183 			}
1184 		}
1185 
1186 		break;
1187 
1188 
1189 	}	/* switch() */
1190 
1191 done:
1192 
1193 	if (unreg_vpi) {
1194 		(void) emlxs_mb_unreg_vpi(port);
1195 	}
1196 	return (0);
1197 
1198 } /* emlxs_port_offline() */
1199 
1200 
1201 
1202 extern void
1203 emlxs_port_online(emlxs_port_t *vport)
1204 {
1205 	emlxs_hba_t *hba = vport->hba;
1206 	emlxs_port_t *port = &PPORT;
1207 	uint32_t state;
1208 	uint32_t update;
1209 	uint32_t npiv_linkup;
1210 	char topology[32];
1211 	char linkspeed[32];
1212 	char mode[32];
1213 
1214 	/*
1215 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "linkup_callback.
1216 	 * vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1217 	 */
1218 
1219 	if ((vport->vpi > 0) &&
1220 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1221 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1222 		return;
1223 	}
1224 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1225 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1226 		return;
1227 	}
1228 	mutex_enter(&EMLXS_PORT_LOCK);
1229 
1230 	/* Check for mode */
1231 	if (port->tgt_mode) {
1232 		(void) strcpy(mode, ", target");
1233 	} else if (port->ini_mode) {
1234 		(void) strcpy(mode, ", initiator");
1235 	} else {
1236 		(void) strcpy(mode, "");
1237 	}
1238 
1239 	/* Check for loop topology */
1240 	if (hba->topology == TOPOLOGY_LOOP) {
1241 		state = FC_STATE_LOOP;
1242 		(void) strcpy(topology, ", loop");
1243 	} else {
1244 		state = FC_STATE_ONLINE;
1245 		(void) strcpy(topology, ", fabric");
1246 	}
1247 
1248 	/* Set the link speed */
1249 	switch (hba->linkspeed) {
1250 	case 0:
1251 		(void) strcpy(linkspeed, "Gb");
1252 		state |= FC_STATE_1GBIT_SPEED;
1253 		break;
1254 
1255 	case LA_1GHZ_LINK:
1256 		(void) strcpy(linkspeed, "1Gb");
1257 		state |= FC_STATE_1GBIT_SPEED;
1258 		break;
1259 	case LA_2GHZ_LINK:
1260 		(void) strcpy(linkspeed, "2Gb");
1261 		state |= FC_STATE_2GBIT_SPEED;
1262 		break;
1263 	case LA_4GHZ_LINK:
1264 		(void) strcpy(linkspeed, "4Gb");
1265 		state |= FC_STATE_4GBIT_SPEED;
1266 		break;
1267 	case LA_8GHZ_LINK:
1268 		(void) strcpy(linkspeed, "8Gb");
1269 		state |= FC_STATE_8GBIT_SPEED;
1270 		break;
1271 	case LA_10GHZ_LINK:
1272 		(void) strcpy(linkspeed, "10Gb");
1273 		state |= FC_STATE_10GBIT_SPEED;
1274 		break;
1275 	default:
1276 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1277 		break;
1278 	}
1279 
1280 	npiv_linkup = 0;
1281 	update = 0;
1282 
1283 	if ((hba->state >= FC_LINK_UP) &&
1284 	    !(hba->flag & FC_LOOPBACK_MODE) &&
1285 	    (vport->ulp_statec != state)) {
1286 		update = 1;
1287 		vport->ulp_statec = state;
1288 
1289 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1290 			hba->flag |= FC_NPIV_LINKUP;
1291 			npiv_linkup = 1;
1292 		}
1293 	}
1294 	mutex_exit(&EMLXS_PORT_LOCK);
1295 
1296 	/*
1297 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "linkup_callback:
1298 	 * update=%d vpi=%d flag=%d fc_flag=%x state=%x statec=%x", update,
1299 	 * vport->vpi, npiv_linkup, hba->flag, hba->state,
1300 	 * vport->ulp_statec);
1301 	 */
1302 	if (update) {
1303 		if (vport->flag & EMLXS_PORT_BOUND) {
1304 			if (vport->vpi == 0) {
1305 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1306 				    "%s%s%s",
1307 				    linkspeed, topology, mode);
1308 			} else if (npiv_linkup) {
1309 				EMLXS_MSGF(EMLXS_CONTEXT,
1310 				    &emlxs_npiv_link_up_msg,
1311 				    "%s%s%s",
1312 				    linkspeed, topology, mode);
1313 			}
1314 #ifdef SFCT_SUPPORT
1315 			if (vport->tgt_mode) {
1316 				emlxs_fct_link_up(vport);
1317 			} else if (vport->ini_mode) {
1318 				vport->ulp_statec_cb(vport->ulp_handle, state);
1319 			}
1320 #else
1321 			vport->ulp_statec_cb(vport->ulp_handle, state);
1322 #endif	/* SFCT_SUPPORT */
1323 		} else {
1324 			if (vport->vpi == 0) {
1325 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1326 				    "%s%s%s *",
1327 				    linkspeed, topology, mode);
1328 			} else if (npiv_linkup) {
1329 				EMLXS_MSGF(EMLXS_CONTEXT,
1330 				    &emlxs_npiv_link_up_msg,
1331 				    "%s%s%s *",
1332 				    linkspeed, topology, mode);
1333 			}
1334 		}
1335 
1336 		/* Check for waiting threads */
1337 		if (vport->vpi == 0) {
1338 			mutex_enter(&EMLXS_LINKUP_LOCK);
1339 			if (hba->linkup_wait_flag == TRUE) {
1340 				hba->linkup_wait_flag = FALSE;
1341 				cv_broadcast(&EMLXS_LINKUP_CV);
1342 			}
1343 			mutex_exit(&EMLXS_LINKUP_LOCK);
1344 		}
1345 		/* Flush any pending ub buffers */
1346 		emlxs_ub_flush(vport);
1347 	}
1348 	return;
1349 
1350 } /* emlxs_port_online() */
1351 
1352 
1353 extern void
1354 emlxs_linkdown(emlxs_hba_t *hba)
1355 {
1356 	emlxs_port_t *port = &PPORT;
1357 	int i;
1358 
1359 	mutex_enter(&EMLXS_PORT_LOCK);
1360 
1361 	HBASTATS.LinkDown++;
1362 	emlxs_ffstate_change_locked(hba, FC_LINK_DOWN);
1363 
1364 	/* Filter hba flags */
1365 	hba->flag &= FC_LINKDOWN_MASK;
1366 	hba->discovery_timer = 0;
1367 	hba->linkup_timer = 0;
1368 
1369 	mutex_exit(&EMLXS_PORT_LOCK);
1370 
1371 	for (i = 0; i < MAX_VPORTS; i++) {
1372 		port = &VPORT(i);
1373 
1374 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1375 			continue;
1376 		}
1377 		(void) emlxs_port_offline(port, 0xffffffff);
1378 
1379 	}
1380 
1381 	return;
1382 
1383 } /* emlxs_linkdown() */
1384 
1385 
1386 extern void
1387 emlxs_linkup(emlxs_hba_t *hba)
1388 {
1389 	emlxs_port_t *port = &PPORT;
1390 	emlxs_config_t *cfg = &CFG;
1391 
1392 	mutex_enter(&EMLXS_PORT_LOCK);
1393 
1394 	HBASTATS.LinkUp++;
1395 	emlxs_ffstate_change_locked(hba, FC_LINK_UP);
1396 
1397 #ifdef MENLO_TEST
1398 	if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
1399 	    (cfg[CFG_HORNET_FLOGI].current == 0)) {
1400 		hba->flag |= FC_MENLO_MODE;
1401 	}
1402 #endif	/* MENLO_TEST */
1403 
1404 #ifdef MENLO_SUPPORT
1405 	if (hba->flag & FC_MENLO_MODE) {
1406 		mutex_exit(&EMLXS_PORT_LOCK);
1407 
1408 		/*
1409 		 * Trigger linkup CV and don't start linkup & discovery
1410 		 * timers
1411 		 */
1412 		mutex_enter(&EMLXS_LINKUP_LOCK);
1413 		cv_broadcast(&EMLXS_LINKUP_CV);
1414 		mutex_exit(&EMLXS_LINKUP_LOCK);
1415 
1416 		return;
1417 	}
1418 #endif	/* MENLO_SUPPORT */
1419 
1420 	/* Set the linkup & discovery timers */
1421 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1422 	hba->discovery_timer = hba->timer_tics +
1423 	    cfg[CFG_LINKUP_TIMEOUT].current + cfg[CFG_DISC_TIMEOUT].current;
1424 
1425 	mutex_exit(&EMLXS_PORT_LOCK);
1426 
1427 	return;
1428 
1429 } /* emlxs_linkup() */
1430 
1431 
1432 /*
1433  *  emlxs_reset_link
1434  *
1435  *  Description:
1436  *  Called to reset the link with an init_link
1437  *
1438  *    Returns:
1439  *
1440  */
1441 extern int
1442 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup)
1443 {
1444 	emlxs_port_t *port = &PPORT;
1445 	emlxs_config_t *cfg;
1446 	MAILBOX *mb;
1447 
1448 	/*
1449 	 * Get a buffer to use for the mailbox command
1450 	 */
1451 	if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == NULL) {
1452 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1453 		    "Unable to allocate mailbox buffer.");
1454 
1455 		return (1);
1456 	}
1457 	cfg = &CFG;
1458 
1459 	if (linkup) {
1460 		/*
1461 		 * Setup and issue mailbox INITIALIZE LINK command
1462 		 */
1463 
1464 		emlxs_mb_init_link(hba, (MAILBOX *)mb,
1465 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1466 
1467 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1468 
1469 		/* Clear the loopback mode */
1470 		mutex_enter(&EMLXS_PORT_LOCK);
1471 		hba->flag &= ~FC_LOOPBACK_MODE;
1472 		mutex_exit(&EMLXS_PORT_LOCK);
1473 
1474 		if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mb,
1475 		    MBX_NOWAIT, 0) != MBX_BUSY) {
1476 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1477 		}
1478 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1479 
1480 	} else {	/* hold link down */
1481 		emlxs_mb_down_link(hba, (MAILBOX *)mb);
1482 
1483 		if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mb,
1484 		    MBX_NOWAIT, 0) != MBX_BUSY) {
1485 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1486 		}
1487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1488 		    "Disabling link...");
1489 	}
1490 
1491 	return (0);
1492 
1493 } /* emlxs_reset_link() */
1494 
1495 
1496 extern int
1497 emlxs_online(emlxs_hba_t *hba)
1498 {
1499 	emlxs_port_t *port = &PPORT;
1500 	int32_t rval = 0;
1501 	uint32_t i = 0;
1502 
1503 	/* Make sure adapter is offline or exit trying (30 seconds) */
1504 	for (; ; ) {
1505 		/* Check if adapter is already going online */
1506 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1507 			return (0);
1508 		}
1509 		mutex_enter(&EMLXS_PORT_LOCK);
1510 
1511 		/* Check again */
1512 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1513 			mutex_exit(&EMLXS_PORT_LOCK);
1514 			return (0);
1515 		}
1516 		/* Check if adapter is offline */
1517 		if (hba->flag & FC_OFFLINE_MODE) {
1518 			/* Mark it going online */
1519 			hba->flag &= ~FC_OFFLINE_MODE;
1520 			hba->flag |= FC_ONLINING_MODE;
1521 
1522 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1523 			mutex_exit(&EMLXS_PORT_LOCK);
1524 			break;
1525 		}
1526 		mutex_exit(&EMLXS_PORT_LOCK);
1527 
1528 		if (i++ > 30) {
1529 			/* Return on timeout */
1530 			return (1);
1531 		}
1532 		DELAYMS(1000);
1533 	}
1534 
1535 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1536 	    "Going online...");
1537 
1538 	if (hba->bus_type == SBUS_FC) {
1539 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba,
1540 		    hba->sbus_csr_addr));
1541 	}
1542 	if (rval = emlxs_ffinit(hba)) {
1543 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1544 		    "status=%x",
1545 		    rval);
1546 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1547 
1548 		/* Set FC_OFFLINE_MODE */
1549 		mutex_enter(&EMLXS_PORT_LOCK);
1550 		emlxs_diag_state = DDI_OFFDI;
1551 		hba->flag |= FC_OFFLINE_MODE;
1552 		hba->flag &= ~FC_ONLINING_MODE;
1553 		mutex_exit(&EMLXS_PORT_LOCK);
1554 
1555 		return (rval);
1556 	}
1557 	/* Start the timer */
1558 	emlxs_timer_start(hba);
1559 
1560 	/* Set FC_ONLINE_MODE */
1561 	mutex_enter(&EMLXS_PORT_LOCK);
1562 	emlxs_diag_state = DDI_ONDI;
1563 	hba->flag |= FC_ONLINE_MODE;
1564 	hba->flag &= ~FC_ONLINING_MODE;
1565 	mutex_exit(&EMLXS_PORT_LOCK);
1566 
1567 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1568 
1569 #ifdef SFCT_SUPPORT
1570 	(void) emlxs_fct_port_initialize(port);
1571 #endif	/* SFCT_SUPPORT */
1572 
1573 	return (rval);
1574 
1575 } /* emlxs_online() */
1576 
1577 
1578 extern int
1579 emlxs_offline(emlxs_hba_t *hba)
1580 {
1581 	emlxs_port_t *port = &PPORT;
1582 	uint32_t i = 0;
1583 	int rval = 1;
1584 
1585 	/* Make sure adapter is online or exit trying (30 seconds) */
1586 	for (; ; ) {
1587 		/* Check if adapter is already going offline */
1588 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1589 			return (0);
1590 		}
1591 		mutex_enter(&EMLXS_PORT_LOCK);
1592 
1593 		/* Check again */
1594 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1595 			mutex_exit(&EMLXS_PORT_LOCK);
1596 			return (0);
1597 		}
1598 		/* Check if adapter is online */
1599 		if (hba->flag & FC_ONLINE_MODE) {
1600 			/* Mark it going offline */
1601 			hba->flag &= ~FC_ONLINE_MODE;
1602 			hba->flag |= FC_OFFLINING_MODE;
1603 
1604 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1605 			mutex_exit(&EMLXS_PORT_LOCK);
1606 			break;
1607 		}
1608 		mutex_exit(&EMLXS_PORT_LOCK);
1609 
1610 		if (i++ > 30) {
1611 			/* Return on timeout */
1612 			return (1);
1613 		}
1614 		DELAYMS(1000);
1615 	}
1616 
1617 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Going offline...");
1618 
1619 	if (port->ini_mode) {
1620 		/* Flush all IO */
1621 		emlxs_linkdown(hba);
1622 
1623 	}
1624 #ifdef SFCT_SUPPORT
1625 	else {
1626 		(void) emlxs_fct_port_shutdown(port);
1627 	}
1628 #endif	/* SFCT_SUPPORT */
1629 
1630 	/* Check if adapter was shutdown */
1631 	if (hba->flag & FC_HARDWARE_ERROR) {
1632 		/* Force mailbox cleanup */
1633 		/* This will wake any sleeping or polling threads */
1634 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1635 	}
1636 	/* Pause here for the IO to settle */
1637 	delay(drv_usectohz(1000000));	/* 1 sec */
1638 
1639 	/* Unregister all nodes */
1640 	emlxs_ffcleanup(hba);
1641 
1642 
1643 	if (hba->bus_type == SBUS_FC) {
1644 		WRITE_SBUS_CSR_REG(hba,
1645 		    FC_SHS_REG(hba, hba->sbus_csr_addr), 0x9A);
1646 	}
1647 	/* Stop the timer */
1648 	emlxs_timer_stop(hba);
1649 
1650 	/* For safety flush every iotag list */
1651 	if (emlxs_iotag_flush(hba)) {
1652 		/* Pause here for the IO to flush */
1653 		delay(drv_usectohz(1000));
1654 	}
1655 
1656 	/* Wait for poll command request to settle */
1657 	while (hba->io_poll_count > 0) {
1658 		delay(drv_usectohz(2000000));   /* 2 sec */
1659 	}
1660 
1661 	/* Interlock the adapter to take it down */
1662 	(void) emlxs_interlock(hba);
1663 
1664 	/* Free all the shared memory */
1665 	(void) emlxs_mem_free_buffer(hba);
1666 
1667 	mutex_enter(&EMLXS_PORT_LOCK);
1668 	hba->flag |= FC_OFFLINE_MODE;
1669 	hba->flag &= ~FC_OFFLINING_MODE;
1670 	emlxs_diag_state = DDI_OFFDI;
1671 	mutex_exit(&EMLXS_PORT_LOCK);
1672 
1673 	rval = 0;
1674 
1675 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1676 
1677 done:
1678 
1679 	return (rval);
1680 
1681 } /* emlxs_offline() */
1682 
1683 
1684 
1685 extern int
1686 emlxs_power_down(emlxs_hba_t *hba)
1687 {
1688 	int32_t rval = 0;
1689 	uint32_t *ptr;
1690 	uint32_t i;
1691 
1692 	if ((rval = emlxs_offline(hba))) {
1693 		return (rval);
1694 	}
1695 	/* Save pci config space */
1696 	ptr = (uint32_t *)hba->pm_config;
1697 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1698 		*ptr = ddi_get32(hba->pci_acc_handle,
1699 		    (uint32_t *)(hba->pci_addr + i));
1700 	}
1701 
1702 	/* Put chip in D3 state */
1703 	(void) ddi_put8(hba->pci_acc_handle,
1704 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1705 	    (uint8_t)PCI_PM_D3_STATE);
1706 
1707 	return (0);
1708 
1709 } /* End emlxs_power_down */
1710 
1711 
1712 extern int
1713 emlxs_power_up(emlxs_hba_t *hba)
1714 {
1715 	int32_t rval = 0;
1716 	uint32_t *ptr;
1717 	uint32_t i;
1718 
1719 
1720 	/* Take chip out of D3 state */
1721 	(void) ddi_put8(hba->pci_acc_handle,
1722 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1723 	    (uint8_t)PCI_PM_D0_STATE);
1724 
1725 	/* Must have at least 10 ms delay here */
1726 	DELAYMS(100);
1727 
1728 	/* Restore pci config space */
1729 	ptr = (uint32_t *)hba->pm_config;
1730 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1731 		(void) ddi_put32(hba->pci_acc_handle,
1732 		    (uint32_t *)(hba->pci_addr + i), *ptr);
1733 	}
1734 
1735 	/* Bring adapter online */
1736 	if ((rval = emlxs_online(hba))) {
1737 		(void) ddi_put8(hba->pci_acc_handle,
1738 		    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1739 		    (uint8_t)PCI_PM_D3_STATE);
1740 
1741 		return (rval);
1742 	}
1743 	return (rval);
1744 
1745 } /* End emlxs_power_up */
1746 
1747 
1748 /*
1749  * NAME:     emlxs_ffcleanup
1750  *
1751  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1752  *
1753  * EXECUTION ENVIRONMENT: process only
1754  *
1755  * CALLED FROM: CFG_TERM
1756  *
1757  * INPUT: hba       - pointer to the dev_ctl area.
1758  *
1759  * RETURNS: none
1760  */
1761 extern void
1762 emlxs_ffcleanup(emlxs_hba_t *hba)
1763 {
1764 	emlxs_port_t *port = &PPORT;
1765 	uint32_t j;
1766 
1767 	/* Disable all but the mailbox interrupt */
1768 	hba->hc_copy = HC_MBINT_ENA;
1769 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1770 
1771 	/* Make sure all port nodes are destroyed */
1772 	for (j = 0; j < MAX_VPORTS; j++) {
1773 		port = &VPORT(j);
1774 
1775 		if (port->node_count) {
1776 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1777 		}
1778 	}
1779 
1780 	/* Clear all interrupt enable conditions */
1781 	hba->hc_copy = 0;
1782 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1783 
1784 	return;
1785 
1786 } /* emlxs_ffcleanup() */
1787 
1788 
1789 extern uint16_t
1790 emlxs_register_pkt(RING *rp, emlxs_buf_t *sbp)
1791 {
1792 	emlxs_hba_t *hba;
1793 	emlxs_port_t *port;
1794 	uint16_t iotag;
1795 	uint32_t i;
1796 
1797 	hba = rp->hba;
1798 
1799 	mutex_enter(&EMLXS_FCTAB_LOCK(rp->ringno));
1800 
1801 	if (sbp->iotag != 0) {
1802 		port = &PPORT;
1803 
1804 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1805 		    "Pkt already registered! ringo=%d iotag=%d sbp=%p",
1806 		    sbp->ring, sbp->iotag, sbp);
1807 	}
1808 	iotag = 0;
1809 	for (i = 0; i < rp->max_iotag; i++) {
1810 		if (!rp->fc_iotag || rp->fc_iotag >= rp->max_iotag) {
1811 			rp->fc_iotag = 1;
1812 		}
1813 		iotag = rp->fc_iotag++;
1814 
1815 		if (rp->fc_table[iotag] == 0 ||
1816 		    rp->fc_table[iotag] == STALE_PACKET) {
1817 			hba->io_count[rp->ringno]++;
1818 			rp->fc_table[iotag] = sbp;
1819 
1820 			sbp->iotag = iotag;
1821 			sbp->ring = rp;
1822 
1823 			break;
1824 		}
1825 		iotag = 0;
1826 	}
1827 
1828 	mutex_exit(&EMLXS_FCTAB_LOCK(rp->ringno));
1829 
1830 	/*
1831 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1832 	 * "emlxs_register_pkt: ringo=%d iotag=%d sbp=%p", rp->ringno, iotag,
1833 	 * sbp);
1834 	 */
1835 
1836 	return (iotag);
1837 
1838 } /* emlxs_register_pkt() */
1839 
1840 
1841 
1842 extern emlxs_buf_t *
1843 emlxs_unregister_pkt(RING *rp, uint16_t iotag, uint32_t forced)
1844 {
1845 	emlxs_hba_t *hba;
1846 	emlxs_buf_t *sbp;
1847 	uint32_t ringno;
1848 
1849 	/* Check the iotag range */
1850 	if ((iotag == 0) || (iotag >= rp->max_iotag)) {
1851 		return (NULL);
1852 	}
1853 	sbp = NULL;
1854 	hba = rp->hba;
1855 	ringno = rp->ringno;
1856 
1857 	/* Remove the sbp from the table */
1858 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1859 	sbp = rp->fc_table[iotag];
1860 
1861 	if (!sbp || (sbp == STALE_PACKET)) {
1862 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1863 		return (sbp);
1864 	}
1865 	rp->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1866 	hba->io_count[ringno]--;
1867 	sbp->iotag = 0;
1868 
1869 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1870 
1871 
1872 	/* Clean up the sbp */
1873 	mutex_enter(&sbp->mtx);
1874 
1875 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
1876 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
1877 		hba->ring_tx_count[ringno]--;
1878 	}
1879 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1880 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1881 	}
1882 	if (sbp->bmp) {
1883 		(void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1884 		sbp->bmp = 0;
1885 	}
1886 	mutex_exit(&sbp->mtx);
1887 
1888 
1889 	/*
1890 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1891 	 * "emlxs_unregister_pkt: ringo=%d iotag=%d sbp=%p", rp->ringno,
1892 	 * iotag, sbp);
1893 	 */
1894 
1895 	return (sbp);
1896 
1897 } /* emlxs_unregister_pkt() */
1898 
1899 
1900 
1901 /* Flush all IO's to all nodes for a given ring */
1902 extern uint32_t
1903 emlxs_tx_ring_flush(emlxs_hba_t *hba, RING *rp, emlxs_buf_t *fpkt)
1904 {
1905 	emlxs_port_t *port = &PPORT;
1906 	emlxs_buf_t *sbp;
1907 	IOCBQ *iocbq;
1908 	IOCBQ *next;
1909 	IOCB *iocb;
1910 	uint32_t ringno;
1911 	Q abort;
1912 	NODELIST *ndlp;
1913 	IOCB *icmd;
1914 	MATCHMAP *mp;
1915 	uint32_t i;
1916 
1917 	ringno = rp->ringno;
1918 	bzero((void *)&abort, sizeof (Q));
1919 
1920 	mutex_enter(&EMLXS_RINGTX_LOCK);
1921 
1922 	/* While a node needs servicing */
1923 	while (rp->nodeq.q_first) {
1924 		ndlp = (NODELIST *)rp->nodeq.q_first;
1925 
1926 		/* Check if priority queue is not empty */
1927 		if (ndlp->nlp_ptx[ringno].q_first) {
1928 			/* Transfer all iocb's to local queue */
1929 			if (abort.q_first == 0) {
1930 				abort.q_first = ndlp->nlp_ptx[ringno].q_first;
1931 				abort.q_last = ndlp->nlp_ptx[ringno].q_last;
1932 			} else {
1933 				((IOCBQ *)abort.q_last)->next =
1934 				    (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
1935 			}
1936 
1937 			abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
1938 		}
1939 		/* Check if tx queue is not empty */
1940 		if (ndlp->nlp_tx[ringno].q_first) {
1941 			/* Transfer all iocb's to local queue */
1942 			if (abort.q_first == 0) {
1943 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
1944 				abort.q_last = ndlp->nlp_tx[ringno].q_last;
1945 			} else {
1946 				((IOCBQ *)abort.q_last)->next =
1947 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
1948 			}
1949 
1950 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
1951 
1952 		}
1953 		/* Clear the queue pointers */
1954 		ndlp->nlp_ptx[ringno].q_first = NULL;
1955 		ndlp->nlp_ptx[ringno].q_last = NULL;
1956 		ndlp->nlp_ptx[ringno].q_cnt = 0;
1957 
1958 		ndlp->nlp_tx[ringno].q_first = NULL;
1959 		ndlp->nlp_tx[ringno].q_last = NULL;
1960 		ndlp->nlp_tx[ringno].q_cnt = 0;
1961 
1962 		/* Remove node from service queue */
1963 
1964 		/* If this is the last node on list */
1965 		if (rp->nodeq.q_last == (void *)ndlp) {
1966 			rp->nodeq.q_last = NULL;
1967 			rp->nodeq.q_first = NULL;
1968 			rp->nodeq.q_cnt = 0;
1969 		} else {
1970 			/* Remove node from head */
1971 			rp->nodeq.q_first = ndlp->nlp_next[ringno];
1972 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
1973 			    rp->nodeq.q_first;
1974 			rp->nodeq.q_cnt--;
1975 		}
1976 
1977 		/* Clear node */
1978 		ndlp->nlp_next[ringno] = NULL;
1979 	}
1980 
1981 	/* First cleanup the iocb's while still holding the lock */
1982 	iocbq = (IOCBQ *)abort.q_first;
1983 	while (iocbq) {
1984 		/* Free the IoTag and the bmp */
1985 		iocb = &iocbq->iocb;
1986 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
1987 
1988 		if (sbp && (sbp != STALE_PACKET)) {
1989 			mutex_enter(&sbp->mtx);
1990 
1991 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
1992 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
1993 				hba->ring_tx_count[ringno]--;
1994 			}
1995 			sbp->pkt_flags |= PACKET_IN_FLUSH;
1996 
1997 			/*
1998 			 * If the fpkt is already set, then we will leave it
1999 			 * alone
2000 			 */
2001 			/*
2002 			 * This ensures that this pkt is only accounted for
2003 			 * on one fpkt->flush_count
2004 			 */
2005 			if (!sbp->fpkt && fpkt) {
2006 				mutex_enter(&fpkt->mtx);
2007 				sbp->fpkt = fpkt;
2008 				fpkt->flush_count++;
2009 				mutex_exit(&fpkt->mtx);
2010 			}
2011 			mutex_exit(&sbp->mtx);
2012 		}
2013 		iocbq = (IOCBQ *)iocbq->next;
2014 
2015 	}	/* end of while */
2016 
2017 	mutex_exit(&EMLXS_RINGTX_LOCK);
2018 
2019 	/* Now abort the iocb's */
2020 	iocbq = (IOCBQ *)abort.q_first;
2021 	while (iocbq) {
2022 		/* Save the next iocbq for now */
2023 		next = (IOCBQ *)iocbq->next;
2024 
2025 		/* Unlink this iocbq */
2026 		iocbq->next = NULL;
2027 
2028 		/* Get the pkt */
2029 		sbp = (emlxs_buf_t *)iocbq->sbp;
2030 
2031 		if (sbp) {
2032 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2033 			    "tx: sbp=%p node=%p",
2034 			    sbp, sbp->node);
2035 
2036 			if (hba->state >= FC_LINK_UP) {
2037 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2038 				    IOERR_ABORT_REQUESTED, 1);
2039 			} else {
2040 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2041 				    IOERR_LINK_DOWN, 1);
2042 			}
2043 
2044 		}
2045 		/* Free the iocb and its associated buffers */
2046 		else {
2047 			icmd = &iocbq->iocb;
2048 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2049 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2050 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2051 				if ((hba->flag &
2052 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2053 					/* HBA is detaching or offlining */
2054 					if (icmd->ulpCommand !=
2055 					    CMD_QUE_RING_LIST64_CN) {
2056 						uint8_t *tmp;
2057 
2058 						for (i = 0;
2059 						    i < icmd->ulpBdeCount;
2060 						    i++) {
2061 
2062 							mp = EMLXS_GET_VADDR(
2063 							    hba, rp, icmd);
2064 
2065 							tmp = (uint8_t *)mp;
2066 							if (mp) {
2067 	(void) emlxs_mem_put(hba, MEM_BUF, tmp);
2068 							}
2069 						}
2070 					}
2071 					(void) emlxs_mem_put(hba, MEM_IOCB,
2072 					    (uint8_t *)iocbq);
2073 				} else {
2074 					/* repost the unsolicited buffer */
2075 					emlxs_issue_iocb_cmd(hba, rp, iocbq);
2076 				}
2077 			}
2078 		}
2079 
2080 		iocbq = next;
2081 
2082 	}	/* end of while */
2083 
2084 	return (abort.q_cnt);
2085 
2086 } /* emlxs_tx_ring_flush() */
2087 
2088 
2089 /* Flush all IO's on all or a given ring for a given node */
2090 extern uint32_t
2091 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, RING *ring,
2092     uint32_t shutdown, emlxs_buf_t *fpkt)
2093 {
2094 	emlxs_hba_t *hba = HBA;
2095 	emlxs_buf_t *sbp;
2096 	uint32_t ringno;
2097 	RING *rp;
2098 	IOCB *icmd;
2099 	IOCBQ *iocbq;
2100 	NODELIST *prev;
2101 	IOCBQ *next;
2102 	IOCB *iocb;
2103 	Q abort;
2104 	uint32_t i;
2105 	MATCHMAP *mp;
2106 
2107 
2108 	bzero((void *)&abort, sizeof (Q));
2109 
2110 	/* Flush all I/O's on tx queue to this target */
2111 	mutex_enter(&EMLXS_RINGTX_LOCK);
2112 
2113 	if (!ndlp->nlp_base && shutdown) {
2114 		ndlp->nlp_active = 0;
2115 	}
2116 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2117 		rp = &hba->ring[ringno];
2118 
2119 		if (ring && rp != ring) {
2120 			continue;
2121 		}
2122 		if (!ndlp->nlp_base || shutdown) {
2123 			/* Check if priority queue is not empty */
2124 			if (ndlp->nlp_ptx[ringno].q_first) {
2125 				/* Transfer all iocb's to local queue */
2126 				if (abort.q_first == 0) {
2127 					abort.q_first =
2128 					    ndlp->nlp_ptx[ringno].q_first;
2129 					abort.q_last =
2130 					    ndlp->nlp_ptx[ringno].q_last;
2131 				} else {
2132 					emlxs_queue_t *q;
2133 
2134 					q = &ndlp->nlp_ptx[ringno];
2135 					((IOCBQ *)abort.q_last)->next =
2136 					    (IOCBQ *)q->q_first;
2137 					/*
2138 					 * ((IOCBQ *)abort.q_last)->next =
2139 					 * (IOCBQ *)
2140 					 * ndlp->nlp_ptx[ringno].q_first;
2141 					 */
2142 				}
2143 
2144 				abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2145 			}
2146 		}
2147 		/* Check if tx queue is not empty */
2148 		if (ndlp->nlp_tx[ringno].q_first) {
2149 			/* Transfer all iocb's to local queue */
2150 			if (abort.q_first == 0) {
2151 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
2152 				abort.q_last = ndlp->nlp_tx[ringno].q_last;
2153 			} else {
2154 				((IOCBQ *)abort.q_last)->next =
2155 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2156 			}
2157 
2158 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2159 		}
2160 		/* Clear the queue pointers */
2161 		ndlp->nlp_ptx[ringno].q_first = NULL;
2162 		ndlp->nlp_ptx[ringno].q_last = NULL;
2163 		ndlp->nlp_ptx[ringno].q_cnt = 0;
2164 
2165 		ndlp->nlp_tx[ringno].q_first = NULL;
2166 		ndlp->nlp_tx[ringno].q_last = NULL;
2167 		ndlp->nlp_tx[ringno].q_cnt = 0;
2168 
2169 		/* If this node was on the ring queue, remove it */
2170 		if (ndlp->nlp_next[ringno]) {
2171 			/* If this is the only node on list */
2172 			if (rp->nodeq.q_first == (void *)ndlp &&
2173 			    rp->nodeq.q_last == (void *)ndlp) {
2174 				rp->nodeq.q_last = NULL;
2175 				rp->nodeq.q_first = NULL;
2176 				rp->nodeq.q_cnt = 0;
2177 			} else if (rp->nodeq.q_first == (void *)ndlp) {
2178 				NODELIST *nd;
2179 
2180 				rp->nodeq.q_first = ndlp->nlp_next[ringno];
2181 				nd = (NODELIST *)rp->nodeq.q_last;
2182 				nd->nlp_next[ringno] = rp->nodeq.q_first;
2183 				rp->nodeq.q_cnt--;
2184 			} else {	/* This is a little more difficult */
2185 				/*
2186 				 * Find the previous node in the circular
2187 				 * ring queue
2188 				 */
2189 				prev = ndlp;
2190 				while (prev->nlp_next[ringno] != ndlp) {
2191 					prev = prev->nlp_next[ringno];
2192 				}
2193 
2194 				prev->nlp_next[ringno] = ndlp->nlp_next[ringno];
2195 
2196 				if (rp->nodeq.q_last == (void *)ndlp) {
2197 					rp->nodeq.q_last = (void *)prev;
2198 				}
2199 				rp->nodeq.q_cnt--;
2200 
2201 			}
2202 
2203 			/* Clear node */
2204 			ndlp->nlp_next[ringno] = NULL;
2205 		}
2206 	}
2207 
2208 	/* First cleanup the iocb's while still holding the lock */
2209 	iocbq = (IOCBQ *)abort.q_first;
2210 	while (iocbq) {
2211 		/* Free the IoTag and the bmp */
2212 		iocb = &iocbq->iocb;
2213 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2214 
2215 		if (sbp && (sbp != STALE_PACKET)) {
2216 			mutex_enter(&sbp->mtx);
2217 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2218 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2219 				hba->ring_tx_count[ring->ringno]--;
2220 			}
2221 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2222 
2223 			/*
2224 			 * If the fpkt is already set, then we will leave it
2225 			 * alone
2226 			 */
2227 			/*
2228 			 * This ensures that this pkt is only accounted for
2229 			 * on one fpkt->flush_count
2230 			 */
2231 			if (!sbp->fpkt && fpkt) {
2232 				mutex_enter(&fpkt->mtx);
2233 				sbp->fpkt = fpkt;
2234 				fpkt->flush_count++;
2235 				mutex_exit(&fpkt->mtx);
2236 			}
2237 			mutex_exit(&sbp->mtx);
2238 		}
2239 		iocbq = (IOCBQ *)iocbq->next;
2240 
2241 	}	/* end of while */
2242 
2243 	mutex_exit(&EMLXS_RINGTX_LOCK);
2244 
2245 	/* Now abort the iocb's outside the locks */
2246 	iocbq = (IOCBQ *)abort.q_first;
2247 	while (iocbq) {
2248 		/* Save the next iocbq for now */
2249 		next = (IOCBQ *)iocbq->next;
2250 
2251 		/* Unlink this iocbq */
2252 		iocbq->next = NULL;
2253 
2254 		/* Get the pkt */
2255 		sbp = (emlxs_buf_t *)iocbq->sbp;
2256 
2257 		if (sbp) {
2258 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2259 			    "tx: sbp=%p node=%p",
2260 			    sbp, sbp->node);
2261 
2262 			if (hba->state >= FC_LINK_UP) {
2263 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2264 				    IOERR_ABORT_REQUESTED, 1);
2265 			} else {
2266 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2267 				    IOERR_LINK_DOWN, 1);
2268 			}
2269 
2270 		}
2271 		/* Free the iocb and its associated buffers */
2272 		else {
2273 			icmd = &iocbq->iocb;
2274 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2275 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2276 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2277 				if ((hba->flag &
2278 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2279 					/* HBA is detaching or offlining */
2280 					if (icmd->ulpCommand !=
2281 					    CMD_QUE_RING_LIST64_CN) {
2282 						uint8_t *tmp;
2283 
2284 						for (i = 0;
2285 						    i < icmd->ulpBdeCount;
2286 						    i++) {
2287 							mp = EMLXS_GET_VADDR(
2288 							    hba, rp, icmd);
2289 
2290 							tmp = (uint8_t *)mp;
2291 							if (mp) {
2292 	(void) emlxs_mem_put(hba, MEM_BUF, tmp);
2293 							}
2294 						}
2295 					}
2296 					(void) emlxs_mem_put(hba, MEM_IOCB,
2297 					    (uint8_t *)iocbq);
2298 				} else {
2299 					/* repost the unsolicited buffer */
2300 					emlxs_issue_iocb_cmd(hba, rp, iocbq);
2301 				}
2302 			}
2303 		}
2304 
2305 		iocbq = next;
2306 
2307 	}	/* end of while */
2308 
2309 	return (abort.q_cnt);
2310 
2311 } /* emlxs_tx_node_flush() */
2312 
2313 
2314 /* Check for IO's on all or a given ring for a given node */
2315 extern uint32_t
2316 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, RING *ring)
2317 {
2318 	emlxs_hba_t *hba = HBA;
2319 	uint32_t ringno;
2320 	RING *rp;
2321 	uint32_t count;
2322 
2323 	count = 0;
2324 
2325 	/* Flush all I/O's on tx queue to this target */
2326 	mutex_enter(&EMLXS_RINGTX_LOCK);
2327 
2328 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2329 		rp = &hba->ring[ringno];
2330 
2331 		if (ring && rp != ring) {
2332 			continue;
2333 		}
2334 		/* Check if priority queue is not empty */
2335 		if (ndlp->nlp_ptx[ringno].q_first) {
2336 			count += ndlp->nlp_ptx[ringno].q_cnt;
2337 		}
2338 		/* Check if tx queue is not empty */
2339 		if (ndlp->nlp_tx[ringno].q_first) {
2340 			count += ndlp->nlp_tx[ringno].q_cnt;
2341 		}
2342 	}
2343 
2344 	mutex_exit(&EMLXS_RINGTX_LOCK);
2345 
2346 	return (count);
2347 
2348 } /* emlxs_tx_node_check() */
2349 
2350 
2351 
2352 /* Flush all IO's on the FCP ring for a given node's lun */
2353 extern uint32_t
2354 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
2355     uint32_t lun, emlxs_buf_t *fpkt)
2356 {
2357 	emlxs_hba_t *hba = HBA;
2358 	emlxs_buf_t *sbp;
2359 	uint32_t ringno;
2360 	IOCBQ *iocbq;
2361 	IOCBQ *prev;
2362 	IOCBQ *next;
2363 	IOCB *iocb;
2364 	IOCB *icmd;
2365 	Q abort;
2366 	uint32_t i;
2367 	MATCHMAP *mp;
2368 	RING *rp;
2369 
2370 	ringno = FC_FCP_RING;
2371 	rp = &hba->ring[ringno];
2372 
2373 	bzero((void *)&abort, sizeof (Q));
2374 
2375 	/* Flush I/O's on txQ to this target's lun */
2376 	mutex_enter(&EMLXS_RINGTX_LOCK);
2377 
2378 	/* Scan the priority queue first */
2379 	prev = NULL;
2380 	iocbq = (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
2381 
2382 	while (iocbq) {
2383 		next = (IOCBQ *)iocbq->next;
2384 		iocb = &iocbq->iocb;
2385 		sbp = (emlxs_buf_t *)iocbq->sbp;
2386 
2387 		/* Check if this IO is for our lun */
2388 		if (sbp->lun == lun) {
2389 			/* Remove iocb from the node's tx queue */
2390 			if (next == 0) {
2391 				ndlp->nlp_ptx[ringno].q_last = (uint8_t *)prev;
2392 			}
2393 			if (prev == 0) {
2394 				ndlp->nlp_ptx[ringno].q_first = (uint8_t *)next;
2395 			} else {
2396 				prev->next = next;
2397 			}
2398 
2399 			iocbq->next = NULL;
2400 			ndlp->nlp_ptx[ringno].q_cnt--;
2401 
2402 			/* Add this iocb to our local abort Q */
2403 			/* This way we don't hold the RINGTX lock too long */
2404 			if (abort.q_first) {
2405 				((IOCBQ *) abort.q_last)->next = iocbq;
2406 				abort.q_last = (uint8_t *)iocbq;
2407 				abort.q_cnt++;
2408 			} else {
2409 				abort.q_first = (uint8_t *)iocbq;
2410 				abort.q_last = (uint8_t *)iocbq;
2411 				abort.q_cnt = 1;
2412 			}
2413 			iocbq->next = NULL;
2414 		} else {
2415 			prev = iocbq;
2416 		}
2417 
2418 		iocbq = next;
2419 
2420 	}	/* while (iocbq) */
2421 
2422 
2423 	/* Scan the regular queue */
2424 	prev = NULL;
2425 	iocbq = (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2426 
2427 	while (iocbq) {
2428 		next = (IOCBQ *)iocbq->next;
2429 		iocb = &iocbq->iocb;
2430 		sbp = (emlxs_buf_t *)iocbq->sbp;
2431 
2432 		/* Check if this IO is for our lun */
2433 		if (sbp->lun == lun) {
2434 			/* Remove iocb from the node's tx queue */
2435 			if (next == 0) {
2436 				ndlp->nlp_tx[ringno].q_last = (uint8_t *)prev;
2437 			}
2438 			if (prev == 0) {
2439 				ndlp->nlp_tx[ringno].q_first = (uint8_t *)next;
2440 			} else {
2441 				prev->next = next;
2442 			}
2443 
2444 			iocbq->next = NULL;
2445 			ndlp->nlp_tx[ringno].q_cnt--;
2446 
2447 			/* Add this iocb to our local abort Q */
2448 			/* This way we don't hold the RINGTX lock too long */
2449 			if (abort.q_first) {
2450 				((IOCBQ *) abort.q_last)->next = iocbq;
2451 				abort.q_last = (uint8_t *)iocbq;
2452 				abort.q_cnt++;
2453 			} else {
2454 				abort.q_first = (uint8_t *)iocbq;
2455 				abort.q_last = (uint8_t *)iocbq;
2456 				abort.q_cnt = 1;
2457 			}
2458 			iocbq->next = NULL;
2459 		} else {
2460 			prev = iocbq;
2461 		}
2462 
2463 		iocbq = next;
2464 
2465 	}	/* while (iocbq) */
2466 
2467 	/* First cleanup the iocb's while still holding the lock */
2468 	iocbq = (IOCBQ *)abort.q_first;
2469 	while (iocbq) {
2470 		/* Free the IoTag and the bmp */
2471 		iocb = &iocbq->iocb;
2472 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2473 
2474 		if (sbp && (sbp != STALE_PACKET)) {
2475 			mutex_enter(&sbp->mtx);
2476 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2477 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2478 				hba->ring_tx_count[ringno]--;
2479 			}
2480 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2481 
2482 			/*
2483 			 * If the fpkt is already set, then we will leave it
2484 			 * alone
2485 			 */
2486 			/*
2487 			 * This ensures that this pkt is only accounted for
2488 			 * on one fpkt->flush_count
2489 			 */
2490 			if (!sbp->fpkt && fpkt) {
2491 				mutex_enter(&fpkt->mtx);
2492 				sbp->fpkt = fpkt;
2493 				fpkt->flush_count++;
2494 				mutex_exit(&fpkt->mtx);
2495 			}
2496 			mutex_exit(&sbp->mtx);
2497 		}
2498 		iocbq = (IOCBQ *)iocbq->next;
2499 
2500 	}	/* end of while */
2501 
2502 	mutex_exit(&EMLXS_RINGTX_LOCK);
2503 
2504 	/* Now abort the iocb's outside the locks */
2505 	iocbq = (IOCBQ *)abort.q_first;
2506 	while (iocbq) {
2507 		/* Save the next iocbq for now */
2508 		next = (IOCBQ *)iocbq->next;
2509 
2510 		/* Unlink this iocbq */
2511 		iocbq->next = NULL;
2512 
2513 		/* Get the pkt */
2514 		sbp = (emlxs_buf_t *)iocbq->sbp;
2515 
2516 		if (sbp) {
2517 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2518 			    "tx: sbp=%p node=%p",
2519 			    sbp, sbp->node);
2520 
2521 			if (hba->state >= FC_LINK_UP) {
2522 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2523 				    IOERR_ABORT_REQUESTED, 1);
2524 			} else {
2525 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2526 				    IOERR_LINK_DOWN, 1);
2527 			}
2528 		}
2529 		/* Free the iocb and its associated buffers */
2530 		else {
2531 			icmd = &iocbq->iocb;
2532 
2533 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2534 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2535 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2536 				if ((hba->flag &
2537 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2538 					/* HBA is detaching or offlining */
2539 					if (icmd->ulpCommand !=
2540 					    CMD_QUE_RING_LIST64_CN) {
2541 						uint8_t *tmp;
2542 
2543 						for (i = 0;
2544 						    i < icmd->ulpBdeCount;
2545 						    i++) {
2546 							mp = EMLXS_GET_VADDR(
2547 							    hba, rp, icmd);
2548 
2549 							tmp = (uint8_t *)mp;
2550 							if (mp) {
2551 	(void) emlxs_mem_put(hba, MEM_BUF, tmp);
2552 							}
2553 						}
2554 					}
2555 					(void) emlxs_mem_put(hba, MEM_IOCB,
2556 					    (uint8_t *)iocbq);
2557 				} else {
2558 					/* repost the unsolicited buffer */
2559 					emlxs_issue_iocb_cmd(hba, rp, iocbq);
2560 				}
2561 			}
2562 		}
2563 
2564 		iocbq = next;
2565 
2566 	}	/* end of while */
2567 
2568 
2569 	return (abort.q_cnt);
2570 
2571 } /* emlxs_tx_lun_flush() */
2572 
2573 
2574 extern void
2575 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2576 {
2577 	emlxs_hba_t *hba;
2578 	emlxs_port_t *port;
2579 	uint32_t ringno;
2580 	NODELIST *nlp;
2581 	RING *rp;
2582 	emlxs_buf_t *sbp;
2583 
2584 	port = (emlxs_port_t *)iocbq->port;
2585 	hba = HBA;
2586 	rp = (RING *)iocbq->ring;
2587 	nlp = (NODELIST *)iocbq->node;
2588 	ringno = rp->ringno;
2589 	sbp = (emlxs_buf_t *)iocbq->sbp;
2590 
2591 	if (nlp == NULL) {
2592 		/* Set node to base node by default */
2593 		nlp = &port->node_base;
2594 
2595 		iocbq->node = (void *)nlp;
2596 
2597 		if (sbp) {
2598 			sbp->node = (void *)nlp;
2599 		}
2600 	}
2601 	if (lock) {
2602 		mutex_enter(&EMLXS_RINGTX_LOCK);
2603 	}
2604 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2605 		if (sbp) {
2606 			mutex_enter(&sbp->mtx);
2607 
2608 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2609 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2610 				hba->ring_tx_count[ringno]--;
2611 			}
2612 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2613 
2614 			mutex_exit(&sbp->mtx);
2615 
2616 			/* Free the ulpIoTag and the bmp */
2617 			(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
2618 
2619 			if (lock) {
2620 				mutex_exit(&EMLXS_RINGTX_LOCK);
2621 			}
2622 			if (hba->state >= FC_LINK_UP) {
2623 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2624 				    IOERR_ABORT_REQUESTED, 1);
2625 			} else {
2626 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2627 				    IOERR_LINK_DOWN, 1);
2628 			}
2629 
2630 			return;
2631 		} else {
2632 			if (lock) {
2633 				mutex_exit(&EMLXS_RINGTX_LOCK);
2634 			}
2635 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2636 		}
2637 
2638 		return;
2639 	}
2640 	if (sbp) {
2641 
2642 		mutex_enter(&sbp->mtx);
2643 
2644 		if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ |
2645 		    PACKET_IN_TXQ)) {
2646 			mutex_exit(&sbp->mtx);
2647 			if (lock) {
2648 				mutex_exit(&EMLXS_RINGTX_LOCK);
2649 			}
2650 			return;
2651 		}
2652 		sbp->pkt_flags |= PACKET_IN_TXQ;
2653 		hba->ring_tx_count[ringno]++;
2654 
2655 		mutex_exit(&sbp->mtx);
2656 	}
2657 	/* Check iocbq priority */
2658 	if (iocbq->flag & IOCB_PRIORITY) {
2659 		/* Add the iocb to the bottom of the node's ptx queue */
2660 		if (nlp->nlp_ptx[ringno].q_first) {
2661 			((IOCBQ *)nlp->nlp_ptx[ringno].q_last)->next = iocbq;
2662 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2663 			nlp->nlp_ptx[ringno].q_cnt++;
2664 		} else {
2665 			nlp->nlp_ptx[ringno].q_first = (uint8_t *)iocbq;
2666 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2667 			nlp->nlp_ptx[ringno].q_cnt = 1;
2668 		}
2669 
2670 		iocbq->next = NULL;
2671 	} else {	/* Normal priority */
2672 
2673 		/* Add the iocb to the bottom of the node's tx queue */
2674 		if (nlp->nlp_tx[ringno].q_first) {
2675 			((IOCBQ *)nlp->nlp_tx[ringno].q_last)->next = iocbq;
2676 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2677 			nlp->nlp_tx[ringno].q_cnt++;
2678 		} else {
2679 			nlp->nlp_tx[ringno].q_first = (uint8_t *)iocbq;
2680 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2681 			nlp->nlp_tx[ringno].q_cnt = 1;
2682 		}
2683 
2684 		iocbq->next = NULL;
2685 	}
2686 
2687 
2688 	/*
2689 	 * Check if the node is not already on ring queue and (is not closed
2690 	 * or  is a priority request)
2691 	 */
2692 	if (!nlp->nlp_next[ringno] && (!(nlp->nlp_flag[ringno] & NLP_CLOSED) ||
2693 	    (iocbq->flag & IOCB_PRIORITY))) {
2694 		/* If so, then add it to the ring queue */
2695 		if (rp->nodeq.q_first) {
2696 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2697 			    (uint8_t *)nlp;
2698 			nlp->nlp_next[ringno] = rp->nodeq.q_first;
2699 
2700 			/*
2701 			 * If this is not the base node then add it to the
2702 			 * tail
2703 			 */
2704 			if (!nlp->nlp_base) {
2705 				rp->nodeq.q_last = (uint8_t *)nlp;
2706 			} else {	/* Otherwise, add it to the head */
2707 				/* The command node always gets priority */
2708 				rp->nodeq.q_first = (uint8_t *)nlp;
2709 			}
2710 
2711 			rp->nodeq.q_cnt++;
2712 		} else {
2713 			rp->nodeq.q_first = (uint8_t *)nlp;
2714 			rp->nodeq.q_last = (uint8_t *)nlp;
2715 			nlp->nlp_next[ringno] = nlp;
2716 			rp->nodeq.q_cnt = 1;
2717 		}
2718 	}
2719 	HBASTATS.IocbTxPut[ringno]++;
2720 
2721 	/* Adjust the ring timeout timer */
2722 	rp->timeout = hba->timer_tics + 5;
2723 
2724 	if (lock) {
2725 		mutex_exit(&EMLXS_RINGTX_LOCK);
2726 	}
2727 	return;
2728 
2729 } /* emlxs_tx_put() */
2730 
2731 
2732 extern IOCBQ *
2733 emlxs_tx_get(RING *rp, uint32_t lock)
2734 {
2735 	emlxs_hba_t *hba;
2736 	uint32_t ringno;
2737 	IOCBQ *iocbq;
2738 	NODELIST *nlp;
2739 	emlxs_buf_t *sbp;
2740 
2741 	hba = rp->hba;
2742 	ringno = rp->ringno;
2743 
2744 	if (lock) {
2745 		mutex_enter(&EMLXS_RINGTX_LOCK);
2746 	}
2747 begin:
2748 
2749 	iocbq = NULL;
2750 
2751 	/* Check if a node needs servicing */
2752 	if (rp->nodeq.q_first) {
2753 		nlp = (NODELIST *)rp->nodeq.q_first;
2754 
2755 		/* Get next iocb from node's priority queue */
2756 
2757 		if (nlp->nlp_ptx[ringno].q_first) {
2758 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
2759 
2760 			/* Check if this is last entry */
2761 			if (nlp->nlp_ptx[ringno].q_last == (void *)iocbq) {
2762 				nlp->nlp_ptx[ringno].q_first = NULL;
2763 				nlp->nlp_ptx[ringno].q_last = NULL;
2764 				nlp->nlp_ptx[ringno].q_cnt = 0;
2765 			} else {
2766 				/* Remove iocb from head */
2767 				nlp->nlp_ptx[ringno].q_first =
2768 				    (void *)iocbq->next;
2769 				nlp->nlp_ptx[ringno].q_cnt--;
2770 			}
2771 
2772 			iocbq->next = NULL;
2773 		}
2774 		/* Get next iocb from node tx queue if node not closed */
2775 		else if (nlp->nlp_tx[ringno].q_first &&
2776 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED)) {
2777 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
2778 
2779 			/* Check if this is last entry */
2780 			if (nlp->nlp_tx[ringno].q_last == (void *)iocbq) {
2781 				nlp->nlp_tx[ringno].q_first = NULL;
2782 				nlp->nlp_tx[ringno].q_last = NULL;
2783 				nlp->nlp_tx[ringno].q_cnt = 0;
2784 			} else {
2785 				/* Remove iocb from head */
2786 				nlp->nlp_tx[ringno].q_first =
2787 				    (void *)iocbq->next;
2788 				nlp->nlp_tx[ringno].q_cnt--;
2789 			}
2790 
2791 			iocbq->next = NULL;
2792 		}
2793 		/* Now deal with node itself */
2794 
2795 		/* Check if node still needs servicing */
2796 		if ((nlp->nlp_ptx[ringno].q_first) ||
2797 		    (nlp->nlp_tx[ringno].q_first &&
2798 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
2799 
2800 			/*
2801 			 * If this is the base node, then don't shift the
2802 			 * pointers
2803 			 */
2804 			/* We want to drain the base node before moving on */
2805 			if (!nlp->nlp_base) {
2806 				/*
2807 				 * Just shift ring queue pointers to next
2808 				 * node
2809 				 */
2810 				rp->nodeq.q_last = (void *)nlp;
2811 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2812 			}
2813 		} else {
2814 			/* Remove node from ring queue */
2815 
2816 			/* If this is the last node on list */
2817 			if (rp->nodeq.q_last == (void *)nlp) {
2818 				rp->nodeq.q_last = NULL;
2819 				rp->nodeq.q_first = NULL;
2820 				rp->nodeq.q_cnt = 0;
2821 			} else {
2822 				NODELIST *nd;
2823 
2824 				/* Remove node from head */
2825 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2826 				nd = (NODELIST *)rp->nodeq.q_last;
2827 				nd->nlp_next[ringno] = rp->nodeq.q_first;
2828 				rp->nodeq.q_cnt--;
2829 
2830 			}
2831 
2832 			/* Clear node */
2833 			nlp->nlp_next[ringno] = NULL;
2834 		}
2835 
2836 		/*
2837 		 * If no iocbq was found on this node, then it will have been
2838 		 * removed. So try again.
2839 		 */
2840 		if (!iocbq) {
2841 			goto begin;
2842 		}
2843 		sbp = (emlxs_buf_t *)iocbq->sbp;
2844 
2845 		if (sbp) {
2846 			/*
2847 			 * Check flags before we enter mutex in case this has
2848 			 * been flushed and destroyed
2849 			 */
2850 			if ((sbp->pkt_flags &
2851 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2852 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2853 				goto begin;
2854 			}
2855 			mutex_enter(&sbp->mtx);
2856 
2857 			if ((sbp->pkt_flags &
2858 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2859 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2860 				mutex_exit(&sbp->mtx);
2861 				goto begin;
2862 			}
2863 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
2864 			hba->ring_tx_count[ringno]--;
2865 
2866 			mutex_exit(&sbp->mtx);
2867 		}
2868 	}
2869 	if (iocbq) {
2870 		HBASTATS.IocbTxGet[ringno]++;
2871 	}
2872 	/* Adjust the ring timeout timer */
2873 	rp->timeout = (rp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
2874 
2875 	if (lock) {
2876 		mutex_exit(&EMLXS_RINGTX_LOCK);
2877 	}
2878 	return (iocbq);
2879 
2880 } /* emlxs_tx_get() */
2881 
2882 
2883 
2884 extern uint32_t
2885 emlxs_chipq_node_flush(emlxs_port_t *port, RING *ring,
2886     NODELIST *ndlp, emlxs_buf_t *fpkt)
2887 {
2888 	emlxs_hba_t *hba = HBA;
2889 	emlxs_buf_t *sbp;
2890 	IOCBQ *iocbq;
2891 	IOCBQ *next;
2892 	Q abort;
2893 	RING *rp;
2894 	uint32_t ringno;
2895 	uint8_t flag[MAX_RINGS];
2896 	uint32_t iotag;
2897 
2898 	bzero((void *)&abort, sizeof (Q));
2899 	bzero((void *)flag, sizeof (flag));
2900 
2901 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2902 		rp = &hba->ring[ringno];
2903 
2904 		if (ring && rp != ring) {
2905 			continue;
2906 		}
2907 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
2908 
2909 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
2910 			sbp = rp->fc_table[iotag];
2911 
2912 			if (sbp && (sbp != STALE_PACKET) &&
2913 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
2914 			    (sbp->node == ndlp) &&
2915 			    (sbp->ring == rp) &&
2916 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
2917 				emlxs_sbp_abort_add(port, sbp, &abort,
2918 				    flag, fpkt);
2919 			}
2920 		}
2921 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
2922 
2923 	}	/* for */
2924 
2925 	/* Now put the iocb's on the tx queue */
2926 	iocbq = (IOCBQ *)abort.q_first;
2927 	while (iocbq) {
2928 		/* Save the next iocbq for now */
2929 		next = (IOCBQ *)iocbq->next;
2930 
2931 		/* Unlink this iocbq */
2932 		iocbq->next = NULL;
2933 
2934 		/* Send this iocbq */
2935 		emlxs_tx_put(iocbq, 1);
2936 
2937 		iocbq = next;
2938 	}
2939 
2940 	/* Now trigger ring service */
2941 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2942 		if (!flag[ringno]) {
2943 			continue;
2944 		}
2945 		rp = &hba->ring[ringno];
2946 
2947 		emlxs_issue_iocb_cmd(hba, rp, 0);
2948 	}
2949 
2950 	return (abort.q_cnt);
2951 
2952 } /* emlxs_chipq_node_flush() */
2953 
2954 
2955 /* Flush all IO's left on all iotag lists */
2956 static uint32_t
2957 emlxs_iotag_flush(emlxs_hba_t *hba)
2958 {
2959 	emlxs_port_t *port = &PPORT;
2960 	emlxs_buf_t *sbp;
2961 	IOCBQ *iocbq;
2962 	IOCB *iocb;
2963 	Q abort;
2964 	RING *rp;
2965 	uint32_t ringno;
2966 	uint32_t iotag;
2967 	uint32_t count;
2968 
2969 	count = 0;
2970 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2971 		rp = &hba->ring[ringno];
2972 
2973 		bzero((void *)&abort, sizeof (Q));
2974 
2975 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
2976 
2977 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
2978 			sbp = rp->fc_table[iotag];
2979 
2980 			if (!sbp || (sbp == STALE_PACKET)) {
2981 				continue;
2982 			}
2983 			/* Unregister the packet */
2984 			rp->fc_table[iotag] = STALE_PACKET;
2985 			hba->io_count[ringno]--;
2986 			sbp->iotag = 0;
2987 
2988 			/* Clean up the sbp */
2989 			mutex_enter(&sbp->mtx);
2990 
2991 			/* Set IOCB status */
2992 			iocbq = &sbp->iocbq;
2993 			iocb = &iocbq->iocb;
2994 
2995 			iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
2996 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
2997 			iocb->ulpLe = 1;
2998 			iocbq->next = NULL;
2999 
3000 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
3001 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
3002 				hba->ring_tx_count[ringno]--;
3003 			}
3004 			if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3005 				sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3006 			}
3007 			if (sbp->bmp) {
3008 				(void) emlxs_mem_put(hba, MEM_BPL,
3009 				    (uint8_t *)sbp->bmp);
3010 				sbp->bmp = 0;
3011 			}
3012 			/* At this point all nodes are assumed destroyed */
3013 			sbp->node = 0;
3014 
3015 			mutex_exit(&sbp->mtx);
3016 
3017 			/* Add this iocb to our local abort Q */
3018 			if (abort.q_first) {
3019 				((IOCBQ *) abort.q_last)->next = iocbq;
3020 				abort.q_last = (uint8_t *)iocbq;
3021 				abort.q_cnt++;
3022 			} else {
3023 				abort.q_first = (uint8_t *)iocbq;
3024 				abort.q_last = (uint8_t *)iocbq;
3025 				abort.q_cnt = 1;
3026 			}
3027 		}
3028 
3029 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3030 
3031 		/* Trigger deferred completion */
3032 		if (abort.q_first) {
3033 			mutex_enter(&rp->rsp_lock);
3034 			if (rp->rsp_head == NULL) {
3035 				rp->rsp_head = (IOCBQ *)abort.q_first;
3036 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3037 			} else {
3038 				rp->rsp_tail->next = (IOCBQ *)abort.q_first;
3039 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3040 			}
3041 			mutex_exit(&rp->rsp_lock);
3042 
3043 			emlxs_thread_trigger2(&rp->intr_thread,
3044 			    emlxs_proc_ring, rp);
3045 
3046 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3047 			    "Forced iotag completion. ring=%d count=%d",
3048 			    ringno, abort.q_cnt);
3049 
3050 			count += abort.q_cnt;
3051 		}
3052 	}
3053 
3054 	return (count);
3055 
3056 } /* emlxs_iotag_flush() */
3057 
3058 
3059 
3060 /* Checks for IO's on all or a given ring for a given node */
3061 extern uint32_t
3062 emlxs_chipq_node_check(emlxs_port_t *port, RING *ring, NODELIST *ndlp)
3063 {
3064 	emlxs_hba_t *hba = HBA;
3065 	emlxs_buf_t *sbp;
3066 	RING *rp;
3067 	uint32_t ringno;
3068 	uint32_t count;
3069 	uint32_t iotag;
3070 
3071 	count = 0;
3072 
3073 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3074 		rp = &hba->ring[ringno];
3075 
3076 		if (ring && rp != ring) {
3077 			continue;
3078 		}
3079 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3080 
3081 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3082 			sbp = rp->fc_table[iotag];
3083 
3084 			if (sbp && (sbp != STALE_PACKET) &&
3085 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3086 			    (sbp->node == ndlp) &&
3087 			    (sbp->ring == rp) &&
3088 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3089 				count++;
3090 			}
3091 		}
3092 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3093 
3094 	}	/* for */
3095 
3096 	return (count);
3097 
3098 } /* emlxs_chipq_node_check() */
3099 
3100 
3101 
3102 /* Flush all IO's for a given node's lun (FC_FCP_RING only) */
3103 extern uint32_t
3104 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3105     uint32_t lun, emlxs_buf_t *fpkt)
3106 {
3107 	emlxs_hba_t *hba = HBA;
3108 	emlxs_buf_t *sbp;
3109 	RING *rp;
3110 	IOCBQ *iocbq;
3111 	IOCBQ *next;
3112 	Q abort;
3113 	uint32_t iotag;
3114 	uint8_t flag[MAX_RINGS];
3115 
3116 	bzero((void *)flag, sizeof (flag));
3117 	bzero((void *)&abort, sizeof (Q));
3118 	rp = &hba->ring[FC_FCP_RING];
3119 
3120 	mutex_enter(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3121 	for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3122 		sbp = rp->fc_table[iotag];
3123 
3124 		if (sbp && (sbp != STALE_PACKET) &&
3125 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3126 		    sbp->node == ndlp &&
3127 		    sbp->ring == rp &&
3128 		    sbp->lun == lun &&
3129 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3130 			emlxs_sbp_abort_add(port, sbp, &abort, flag, fpkt);
3131 		}
3132 	}
3133 	mutex_exit(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3134 
3135 	/* Now put the iocb's on the tx queue */
3136 	iocbq = (IOCBQ *)abort.q_first;
3137 	while (iocbq) {
3138 		/* Save the next iocbq for now */
3139 		next = (IOCBQ *)iocbq->next;
3140 
3141 		/* Unlink this iocbq */
3142 		iocbq->next = NULL;
3143 
3144 		/* Send this iocbq */
3145 		emlxs_tx_put(iocbq, 1);
3146 
3147 		iocbq = next;
3148 	}
3149 
3150 	/* Now trigger ring service */
3151 	if (abort.q_cnt) {
3152 		emlxs_issue_iocb_cmd(hba, rp, 0);
3153 	}
3154 	return (abort.q_cnt);
3155 
3156 } /* emlxs_chipq_lun_flush() */
3157 
3158 
3159 
3160 /*
3161  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3162  * This must be called while holding the EMLXS_FCCTAB_LOCK
3163  */
3164 extern IOCBQ *
3165 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp, uint16_t iotag,
3166     RING *rp, uint8_t class, int32_t flag)
3167 {
3168 	emlxs_hba_t *hba = HBA;
3169 	IOCBQ *iocbq;
3170 	IOCB *iocb;
3171 	uint16_t abort_iotag;
3172 
3173 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3174 		return (NULL);
3175 	}
3176 	iocbq->ring = (void *)rp;
3177 	iocbq->port = (void *)port;
3178 	iocbq->node = (void *)ndlp;
3179 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3180 	iocb = &iocbq->iocb;
3181 
3182 	/*
3183 	 * set up an iotag using special Abort iotags
3184 	 */
3185 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3186 		rp->fc_abort_iotag = rp->max_iotag;
3187 	}
3188 	abort_iotag = rp->fc_abort_iotag++;
3189 
3190 
3191 	iocb->ulpIoTag = abort_iotag;
3192 	iocb->un.acxri.abortType = flag;
3193 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3194 	iocb->un.acxri.abortIoTag = iotag;
3195 	iocb->ulpLe = 1;
3196 	iocb->ulpClass = class;
3197 	iocb->ulpCommand = CMD_ABORT_XRI_CN;
3198 	iocb->ulpOwner = OWN_CHIP;
3199 
3200 	return (iocbq);
3201 
3202 } /* emlxs_create_abort_xri_cn() */
3203 
3204 
3205 extern IOCBQ *
3206 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3207     RING *rp, uint8_t class, int32_t flag)
3208 {
3209 	emlxs_hba_t *hba = HBA;
3210 	IOCBQ *iocbq;
3211 	IOCB *iocb;
3212 	uint16_t abort_iotag;
3213 
3214 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3215 		return (NULL);
3216 	}
3217 	iocbq->ring = (void *)rp;
3218 	iocbq->port = (void *)port;
3219 	iocbq->node = (void *)ndlp;
3220 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3221 	iocb = &iocbq->iocb;
3222 
3223 	/*
3224 	 * set up an iotag using special Abort iotags
3225 	 */
3226 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3227 		rp->fc_abort_iotag = rp->max_iotag;
3228 	}
3229 	abort_iotag = rp->fc_abort_iotag++;
3230 
3231 	iocb->ulpContext = xid;
3232 	iocb->ulpIoTag = abort_iotag;
3233 	iocb->un.acxri.abortType = flag;
3234 	iocb->ulpLe = 1;
3235 	iocb->ulpClass = class;
3236 	iocb->ulpCommand = CMD_ABORT_XRI_CX;
3237 	iocb->ulpOwner = OWN_CHIP;
3238 
3239 	return (iocbq);
3240 
3241 } /* emlxs_create_abort_xri_cx() */
3242 
3243 
3244 
3245 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3246 extern IOCBQ *
3247 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3248     uint16_t iotag, RING *rp)
3249 {
3250 	emlxs_hba_t *hba = HBA;
3251 	IOCBQ *iocbq;
3252 	IOCB *iocb;
3253 	uint16_t abort_iotag;
3254 
3255 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3256 		return (NULL);
3257 	}
3258 	iocbq->ring = (void *)rp;
3259 	iocbq->port = (void *)port;
3260 	iocbq->node = (void *)ndlp;
3261 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3262 	iocb = &iocbq->iocb;
3263 
3264 	/*
3265 	 * set up an iotag using special Abort iotags
3266 	 */
3267 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3268 		rp->fc_abort_iotag = rp->max_iotag;
3269 	}
3270 	abort_iotag = rp->fc_abort_iotag++;
3271 
3272 	iocb->ulpIoTag = abort_iotag;
3273 	iocb->un.acxri.abortType = 0;
3274 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3275 	iocb->un.acxri.abortIoTag = iotag;
3276 	iocb->ulpLe = 1;
3277 	iocb->ulpClass = 0;
3278 	iocb->ulpCommand = CMD_CLOSE_XRI_CN;
3279 	iocb->ulpOwner = OWN_CHIP;
3280 
3281 	return (iocbq);
3282 
3283 } /* emlxs_create_close_xri_cn() */
3284 
3285 
3286 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3287 extern IOCBQ *
3288 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp,
3289     uint16_t xid, RING *rp)
3290 {
3291 	emlxs_hba_t *hba = HBA;
3292 	IOCBQ *iocbq;
3293 	IOCB *iocb;
3294 	uint16_t abort_iotag;
3295 
3296 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3297 		return (NULL);
3298 	}
3299 	iocbq->ring = (void *)rp;
3300 	iocbq->port = (void *)port;
3301 	iocbq->node = (void *)ndlp;
3302 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3303 	iocb = &iocbq->iocb;
3304 
3305 	/*
3306 	 * set up an iotag using special Abort iotags
3307 	 */
3308 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3309 		rp->fc_abort_iotag = rp->max_iotag;
3310 	}
3311 	abort_iotag = rp->fc_abort_iotag++;
3312 
3313 	iocb->ulpContext = xid;
3314 	iocb->ulpIoTag = abort_iotag;
3315 	iocb->ulpLe = 1;
3316 	iocb->ulpClass = 0;
3317 	iocb->ulpCommand = CMD_CLOSE_XRI_CX;
3318 	iocb->ulpOwner = OWN_CHIP;
3319 
3320 	return (iocbq);
3321 
3322 } /* emlxs_create_close_xri_cx() */
3323 
3324 
3325 void
3326 emlxs_abort_ct_exchange(emlxs_port_t *port, uint32_t rxid)
3327 {
3328 	emlxs_hba_t *hba = HBA;
3329 	RING *rp;
3330 	IOCBQ *iocbq;
3331 
3332 	rp = &hba->ring[FC_CT_RING];
3333 
3334 	/* Create the abort IOCB */
3335 	if (hba->state >= FC_LINK_UP) {
3336 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, rp,
3337 		    CLASS3, ABORT_TYPE_ABTS);
3338 	}
3339 	else
3340 	{
3341 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, rp);
3342 	}
3343 	iocbq->port = port;
3344 	emlxs_issue_iocb_cmd(hba, rp, iocbq);
3345 }
3346 
3347 
3348 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3349 static void
3350 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
3351     uint8_t *flag, emlxs_buf_t *fpkt)
3352 {
3353 	emlxs_hba_t *hba = HBA;
3354 	IOCBQ *iocbq;
3355 	RING *rp;
3356 	NODELIST *ndlp;
3357 
3358 	rp = (RING *)sbp->ring;
3359 	ndlp = sbp->node;
3360 
3361 	/* Create the close XRI IOCB */
3362 	iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, rp);
3363 
3364 	/* Add this iocb to our local abort Q */
3365 	/* This way we don't hold the CHIPQ lock too long */
3366 	if (iocbq) {
3367 		if (abort->q_first) {
3368 			((IOCBQ *) abort->q_last)->next = iocbq;
3369 			abort->q_last = (uint8_t *)iocbq;
3370 			abort->q_cnt++;
3371 		} else {
3372 			abort->q_first = (uint8_t *)iocbq;
3373 			abort->q_last = (uint8_t *)iocbq;
3374 			abort->q_cnt = 1;
3375 		}
3376 		iocbq->next = NULL;
3377 	}
3378 	/* set the flags */
3379 	mutex_enter(&sbp->mtx);
3380 
3381 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
3382 	sbp->ticks = hba->timer_tics + 10;
3383 	sbp->abort_attempts++;
3384 
3385 	flag[rp->ringno] = 1;
3386 
3387 	/* If the fpkt is already set, then we will leave it alone */
3388 	/*
3389 	 * This ensures that this pkt is only accounted for on one
3390 	 * fpkt->flush_count
3391 	 */
3392 	if (!sbp->fpkt && fpkt) {
3393 		mutex_enter(&fpkt->mtx);
3394 		sbp->fpkt = fpkt;
3395 		fpkt->flush_count++;
3396 		mutex_exit(&fpkt->mtx);
3397 	}
3398 	mutex_exit(&sbp->mtx);
3399 
3400 	return;
3401 
3402 } /* emlxs_sbp_abort_add() */
3403