1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #include "emlxs.h"
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
37     uint8_t *flag, emlxs_buf_t *fpkt);
38 static uint32_t emlxs_iotag_flush(emlxs_hba_t *hba);
39 
40 /*
41  * This routine copies data from src then potentially swaps the destination to
42  * big endian. Assumes cnt is a multiple of * sizeof(uint32_t).
43  */
44 extern void
45 emlxs_pcimem_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
46 {
47 	uint32_t ldata;
48 	int32_t i;
49 
50 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
51 		ldata = *src++;
52 		ldata = PCIMEM_LONG(ldata);
53 		*dest++ = ldata;
54 	}
55 } /* emlxs_pcimem_bcopy */
56 
57 
58 /*
59  * This routine copies data from src then swaps the destination to big endian.
60  * Assumes cnt is a multiple of sizeof(uint32_t).
61  */
62 extern void
63 emlxs_swap_bcopy(uint32_t *src, uint32_t *dest, uint32_t cnt)
64 {
65 	uint32_t ldata;
66 	int32_t i;
67 
68 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
69 		ldata = *src++;
70 		ldata = SWAP_DATA32(ldata);
71 		*dest++ = ldata;
72 	}
73 } /* End fc_swap_bcopy */
74 
75 
76 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
77 #define	SCSI_INQUIRY	0x12
78 #define	SCSI_RX_DIAG    0x1C
79 
80 
81 /*
82  *  emlxs_handle_fcp_event
83  *
84  *  Description: Process an FCP Rsp Ring completion
85  *
86  */
87 /* ARGSUSED */
88 extern void
89 emlxs_handle_fcp_event(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
90 {
91 	emlxs_port_t *port = &PPORT;
92 	IOCB *cmd;
93 	emlxs_buf_t *sbp;
94 	fc_packet_t *pkt = NULL;
95 	uint32_t iostat;
96 	uint8_t localstat;
97 	fcp_rsp_t *rsp;
98 	uint32_t rsp_data_resid;
99 	uint32_t check_underrun;
100 	uint8_t asc;
101 	uint8_t ascq;
102 	uint8_t scsi_status;
103 	uint8_t sense;
104 	uint32_t did;
105 	uint32_t fix_it;
106 	uint8_t *scsi_cmd;
107 	uint8_t scsi_opcode;
108 	uint16_t scsi_dl;
109 	uint32_t data_rx;
110 
111 	cmd = &iocbq->iocb;
112 
113 	/* Initialize the status */
114 	iostat = cmd->ulpStatus;
115 	localstat = 0;
116 	scsi_status = 0;
117 	asc = 0;
118 	ascq = 0;
119 	sense = 0;
120 	check_underrun = 0;
121 	fix_it = 0;
122 
123 	HBASTATS.FcpEvent++;
124 
125 	sbp = (emlxs_buf_t *)iocbq->sbp;
126 
127 	if (!sbp) {
128 		/* completion with missing xmit command */
129 		HBASTATS.FcpStray++;
130 
131 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
132 		    "cmd=%x iotag=%x",
133 		    cmd->ulpCommand, cmd->ulpIoTag);
134 
135 		return;
136 	}
137 	HBASTATS.FcpCompleted++;
138 
139 	pkt = PRIV2PKT(sbp);
140 
141 	did = SWAP_DATA24_LO(pkt->pkt_cmd_fhdr.d_id);
142 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
143 	scsi_opcode = scsi_cmd[12];
144 	data_rx = 0;
145 
146 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
147 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
148 		emlxs_mpdata_sync(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
149 		    DDI_DMA_SYNC_FORKERNEL);
150 
151 #ifdef TEST_SUPPORT
152 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
153 		    (pkt->pkt_datalen >= 512)) {
154 			hba->underrun_counter--;
155 			iostat = IOSTAT_FCP_RSP_ERROR;
156 
157 			/* Report 512 bytes missing by adapter */
158 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
159 
160 			/* Corrupt 512 bytes of Data buffer */
161 			bzero((uint8_t *)pkt->pkt_data, 512);
162 
163 			/* Set FCP response to STATUS_GOOD */
164 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
165 		}
166 #endif	/* TEST_SUPPORT */
167 	}
168 	/* Process the pkt */
169 	mutex_enter(&sbp->mtx);
170 
171 	/* Check for immediate return */
172 	if ((iostat == IOSTAT_SUCCESS) &&
173 	    (pkt->pkt_comp) &&
174 	    !(sbp->pkt_flags & (PACKET_RETURNED | PACKET_COMPLETED |
175 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
176 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
177 	    PACKET_IN_ABORT | PACKET_POLLED))) {
178 		HBASTATS.FcpGood++;
179 
180 		sbp->pkt_flags |= (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
181 		    PACKET_COMPLETED | PACKET_RETURNED);
182 		mutex_exit(&sbp->mtx);
183 
184 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
185 		emlxs_unswap_pkt(sbp);
186 #endif	/* EMLXS_MODREV2X */
187 
188 		(*pkt->pkt_comp) (pkt);
189 
190 		return;
191 	}
192 	/*
193 	 * A response is only placed in the resp buffer if
194 	 * IOSTAT_FCP_RSP_ERROR is reported
195 	 */
196 
197 	/* Check if a response buffer was provided */
198 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
199 		emlxs_mpdata_sync(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
200 		    DDI_DMA_SYNC_FORKERNEL);
201 
202 		/* Get the response buffer pointer */
203 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
204 
205 		/* Set the valid response flag */
206 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
207 
208 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
209 
210 		/*
211 		 * Convert a task abort to a check condition with no data
212 		 * transferred
213 		 */
214 		/*
215 		 * We saw a data corruption when Solaris received a Task
216 		 * Abort from a tape
217 		 */
218 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
219 			EMLXS_MSGF(EMLXS_CONTEXT,
220 			    &emlxs_fcp_completion_error_msg,
221 			    "Task Abort. Fixed. "
222 			    "did=0x%06x sbp=%p cmd=%02x dl=%d",
223 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
224 
225 			rsp->fcp_u.fcp_status.scsi_status =
226 			    SCSI_STAT_CHECK_COND;
227 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
228 			rsp->fcp_u.fcp_status.sense_len_set = 0;
229 			rsp->fcp_u.fcp_status.resid_over = 0;
230 
231 			if (pkt->pkt_datalen) {
232 				rsp->fcp_u.fcp_status.resid_under = 1;
233 				rsp->fcp_resid = SWAP_DATA32(pkt->pkt_datalen);
234 			} else {
235 				rsp->fcp_u.fcp_status.resid_under = 0;
236 				rsp->fcp_resid = 0;
237 			}
238 
239 			scsi_status = SCSI_STAT_CHECK_COND;
240 		}
241 		/*
242 		 * We only need to check underrun if data could have been
243 		 * sent
244 		 */
245 
246 		/* Always check underrun if status is good */
247 		if (scsi_status == SCSI_STAT_GOOD) {
248 			check_underrun = 1;
249 		}
250 		/* Check the sense codes if this is a check condition */
251 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
252 			check_underrun = 1;
253 
254 			/* Check if sense data was provided */
255 			if (SWAP_DATA32(rsp->fcp_sense_len) >= 14) {
256 				sense = *((uint8_t *)rsp + 32 + 2);
257 				asc = *((uint8_t *)rsp + 32 + 12);
258 				ascq = *((uint8_t *)rsp + 32 + 13);
259 			}
260 		}
261 		/* Status is not good and this is not a check condition */
262 		/* No data should have been sent */
263 		else {
264 			check_underrun = 0;
265 		}
266 
267 		/* Get the residual underrun count reported by the SCSI reply */
268 		rsp_data_resid = (pkt->pkt_datalen &&
269 		    rsp->fcp_u.fcp_status.resid_under)
270 		    ? SWAP_DATA32(rsp->fcp_resid) : 0;
271 
272 		/* Set the pkt resp_resid field */
273 		pkt->pkt_resp_resid = 0;
274 
275 		/* Set the pkt data_resid field */
276 		if (pkt->pkt_datalen &&
277 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
278 			/*
279 			 * Get the residual underrun count reported by our
280 			 * adapter
281 			 */
282 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
283 
284 			/* Get the actual amount of data transferred */
285 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
286 
287 			/*
288 			 * If the residual being reported by the adapter is
289 			 * greater than the residual being reported in the
290 			 * reply, then we have a true underrun.
291 			 */
292 			if (check_underrun &&
293 			    (pkt->pkt_data_resid > rsp_data_resid)) {
294 				switch (scsi_opcode) {
295 				case SCSI_INQUIRY:
296 					scsi_dl = scsi_cmd[16];
297 					break;
298 
299 				case SCSI_RX_DIAG:
300 					scsi_dl = (scsi_cmd[15] * 0x100) +
301 					    scsi_cmd[16];
302 					break;
303 
304 				default:
305 					scsi_dl = pkt->pkt_datalen;
306 				}
307 
308 #ifdef FCP_UNDERRUN_PATCH1
309 				/*
310 				 * If status is not good and no data was
311 				 * actually transferred, then we must fix the
312 				 * issue
313 				 */
314 				if ((scsi_status != SCSI_STAT_GOOD) &&
315 				    (data_rx == 0)) {
316 					fix_it = 1;
317 
318 					EMLXS_MSGF(EMLXS_CONTEXT,
319 					    &emlxs_fcp_completion_error_msg,
320 					    "Underrun(1). Fixed. did=0x%06x "
321 					    "sbp=%p cmd=%02x dl=%d,%d rx=%d "
322 					    "rsp=%d",
323 					    did, sbp, scsi_opcode,
324 					    pkt->pkt_datalen, scsi_dl,
325 					    (pkt->pkt_datalen -
326 					    cmd->un.fcpi.fcpi_parm),
327 					    rsp_data_resid);
328 
329 				}
330 #endif	/* FCP_UNDERRUN_PATCH1 */
331 
332 
333 #ifdef FCP_UNDERRUN_PATCH2
334 				if ((scsi_status == SCSI_STAT_GOOD)) {
335 					emlxs_msg_t *msg;
336 
337 					msg = &emlxs_fcp_completion_error_msg;
338 					/*
339 					 * If status is good and this is an
340 					 * inquiry request and the amount of
341 					 * data
342 					 */
343 					/*
344 					 * requested <= data received, then
345 					 * we must fix the issue.
346 					 */
347 
348 					if ((scsi_opcode == SCSI_INQUIRY) &&
349 					    (pkt->pkt_datalen >= data_rx) &&
350 					    (scsi_dl <= data_rx)) {
351 						fix_it = 1;
352 
353 						EMLXS_MSGF(EMLXS_CONTEXT,
354 						    msg,
355 						    "Underrun(2). Fixed. "
356 						    "did=0x%06x sbp=%p "
357 						    "cmd=%02x dl=%d,%d "
358 						    "rx=%d rsp=%d",
359 						    did, sbp, scsi_opcode,
360 						    pkt->pkt_datalen, scsi_dl,
361 						    data_rx, rsp_data_resid);
362 
363 					}
364 					/*
365 					 * If status is good and this is an
366 					 * inquiry request and the amount of
367 					 * data
368 					 */
369 					/*
370 					 * requested >= 128 bytes, but only
371 					 * 128 bytes were received,
372 					 */
373 					/* then we must fix the issue. */
374 					else if ((scsi_opcode == SCSI_INQUIRY)&&
375 					    (pkt->pkt_datalen >= 128) &&
376 					    (scsi_dl >= 128) &&
377 					    (data_rx == 128)) {
378 						fix_it = 1;
379 
380 						EMLXS_MSGF(EMLXS_CONTEXT,
381 						    msg,
382 						    "Underrun(3). Fixed. "
383 						    "did=0x%06x sbp=%p "
384 						    "cmd=%02x dl=%d,%d rx=%d "
385 						    "rsp=%d",
386 						    did, sbp, scsi_opcode,
387 						    pkt->pkt_datalen, scsi_dl,
388 						    data_rx, rsp_data_resid);
389 
390 					}
391 				}
392 #endif	/* FCP_UNDERRUN_PATCH2 */
393 
394 				/*
395 				 * Check if SCSI response payload should be
396 				 * fixed or
397 				 */
398 				/* if a DATA_UNDERRUN should be reported */
399 				if (fix_it) {
400 					/*
401 					 * Fix the SCSI response payload
402 					 * itself
403 					 */
404 					rsp->fcp_u.fcp_status.resid_under = 1;
405 					rsp->fcp_resid =
406 					    SWAP_DATA32(pkt->pkt_data_resid);
407 				} else {
408 					/*
409 					 * Change the status from
410 					 * IOSTAT_FCP_RSP_ERROR to
411 					 * IOSTAT_DATA_UNDERRUN
412 					 */
413 					iostat = IOSTAT_DATA_UNDERRUN;
414 					pkt->pkt_data_resid = pkt->pkt_datalen;
415 				}
416 			}
417 			/*
418 			 * If the residual being reported by the adapter is
419 			 * less than the residual being reported in the
420 			 * reply, then we have a true overrun. Since we don't
421 			 * know where the extra data came from or went to
422 			 * then we cannot trust anything we received
423 			 */
424 			else if (rsp_data_resid > pkt->pkt_data_resid) {
425 				/*
426 				 * Change the status from
427 				 * IOSTAT_FCP_RSP_ERROR to
428 				 * IOSTAT_DATA_OVERRUN
429 				 */
430 				iostat = IOSTAT_DATA_OVERRUN;
431 				pkt->pkt_data_resid = pkt->pkt_datalen;
432 			}
433 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
434 			/* Report whatever the target reported */
435 			pkt->pkt_data_resid = rsp_data_resid;
436 		}
437 	}
438 	/*
439 	 * If pkt is tagged for timeout then set the return codes
440 	 * appropriately
441 	 */
442 	if (sbp->pkt_flags & PACKET_IN_TIMEOUT) {
443 		iostat = IOSTAT_LOCAL_REJECT;
444 		localstat = IOERR_ABORT_TIMEOUT;
445 		goto done;
446 	}
447 	/* If pkt is tagged for abort then set the return codes appropriately */
448 	if (sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_IN_ABORT)) {
449 		iostat = IOSTAT_LOCAL_REJECT;
450 		localstat = IOERR_ABORT_REQUESTED;
451 		goto done;
452 	}
453 	/* Print completion message */
454 	switch (iostat) {
455 	case IOSTAT_SUCCESS:
456 		/* Build SCSI GOOD status */
457 		if (pkt->pkt_rsplen) {
458 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
459 		}
460 		break;
461 
462 	case IOSTAT_FCP_RSP_ERROR:
463 		break;
464 
465 	case IOSTAT_REMOTE_STOP:
466 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
467 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x",
468 		    did, sbp, scsi_opcode);
469 		break;
470 
471 	case IOSTAT_LOCAL_REJECT:
472 		localstat = cmd->un.grsp.perr.statLocalError;
473 
474 		switch (localstat) {
475 		case IOERR_SEQUENCE_TIMEOUT:
476 			EMLXS_MSGF(EMLXS_CONTEXT,
477 			    &emlxs_fcp_completion_error_msg,
478 			    "Local reject. %s did=0x%06x sbp=%p "
479 			    "cmd=%02x tmo=%d ",
480 			    emlxs_error_xlate(localstat), did, sbp,
481 			    scsi_opcode, pkt->pkt_timeout);
482 			break;
483 
484 		default:
485 			EMLXS_MSGF(EMLXS_CONTEXT,
486 			    &emlxs_fcp_completion_error_msg,
487 			    "Local reject. %s did=0x%06x sbp=%p cmd=%02x",
488 			    emlxs_error_xlate(localstat), did,
489 			    sbp, scsi_opcode);
490 		}
491 
492 		break;
493 
494 	case IOSTAT_NPORT_RJT:
495 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
496 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x",
497 		    did, sbp, scsi_opcode);
498 		break;
499 
500 	case IOSTAT_FABRIC_RJT:
501 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
502 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x",
503 		    did, sbp, scsi_opcode);
504 		break;
505 
506 	case IOSTAT_NPORT_BSY:
507 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
508 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x",
509 		    did, sbp, scsi_opcode);
510 		break;
511 
512 	case IOSTAT_FABRIC_BSY:
513 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
514 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x",
515 		    did, sbp, scsi_opcode);
516 		break;
517 
518 	case IOSTAT_INTERMED_RSP:
519 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
520 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x",
521 		    did, sbp, scsi_opcode);
522 		break;
523 
524 	case IOSTAT_LS_RJT:
525 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
526 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x",
527 		    did, sbp, scsi_opcode);
528 		break;
529 
530 	case IOSTAT_DATA_UNDERRUN:
531 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
532 		    "Underrun. did=0x%06x sbp=%p cmd=%02x dl=%d,%d rx=%d "
533 		    "rsp=%d (%02x,%02x,%02x,%02x)",
534 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl,
535 		    data_rx, rsp_data_resid, scsi_status, sense, asc, ascq);
536 		break;
537 
538 	case IOSTAT_DATA_OVERRUN:
539 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
540 		    "Overrun. did=0x%06x sbp=%p cmd=%02x dl=%d,%d rx=%d "
541 		    "rsp=%d (%02x,%02x,%02x,%02x)",
542 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl,
543 		    data_rx, rsp_data_resid, scsi_status, sense, asc, ascq);
544 		break;
545 
546 	default:
547 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
548 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
549 		    iostat, cmd->un.grsp.perr.statLocalError, did,
550 		    sbp, scsi_opcode);
551 		break;
552 	}
553 
554 done:
555 
556 	if (iostat == IOSTAT_SUCCESS) {
557 		HBASTATS.FcpGood++;
558 	} else {
559 		HBASTATS.FcpError++;
560 	}
561 
562 	mutex_exit(&sbp->mtx);
563 
564 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
565 
566 	return;
567 
568 } /* emlxs_handle_fcp_event() */
569 
570 
571 
572 /*
573  *  emlxs_post_buffer
574  *
575  *  This routine will post count buffers to the
576  *  ring with the QUE_RING_BUF_CN command. This
577  *  allows 2 buffers / command to be posted.
578  *  Returns the number of buffers NOT posted.
579  */
580 extern int
581 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
582 {
583 	emlxs_port_t *port = &PPORT;
584 	IOCB *icmd;
585 	IOCBQ *iocbq;
586 	MATCHMAP *mp;
587 	uint16_t tag;
588 	uint32_t maxqbuf;
589 	int32_t i;
590 	int32_t j;
591 	uint32_t seg;
592 	uint32_t size;
593 
594 	mp = 0;
595 	maxqbuf = 2;
596 	tag = (uint16_t)cnt;
597 	cnt += rp->fc_missbufcnt;
598 
599 	if (rp->ringno == FC_ELS_RING) {
600 		seg = MEM_BUF;
601 		size = MEM_ELSBUF_SIZE;
602 	} else if (rp->ringno == FC_IP_RING) {
603 		seg = MEM_IPBUF;
604 		size = MEM_IPBUF_SIZE;
605 	} else if (rp->ringno == FC_CT_RING) {
606 		seg = MEM_CTBUF;
607 		size = MEM_CTBUF_SIZE;
608 	}
609 #ifdef SFCT_SUPPORT
610 	else if (rp->ringno == FC_FCT_RING) {
611 		seg = MEM_FCTBUF;
612 		size = MEM_FCTBUF_SIZE;
613 	}
614 #endif	/* SFCT_SUPPORT */
615 	else {
616 		return (0);
617 	}
618 
619 	/*
620 	 * While there are buffers to post
621 	 */
622 	while (cnt) {
623 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
624 			rp->fc_missbufcnt = cnt;
625 			return (cnt);
626 		}
627 		iocbq->ring = (void *)rp;
628 		iocbq->port = (void *)port;
629 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
630 
631 		icmd = &iocbq->iocb;
632 
633 		/*
634 		 * Max buffers can be posted per command
635 		 */
636 		for (i = 0; i < maxqbuf; i++) {
637 			if (cnt <= 0)
638 				break;
639 
640 			/* fill in BDEs for command */
641 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg)) == 0) {
642 				uint32_t H;
643 				uint32_t L;
644 
645 				icmd->ulpBdeCount = i;
646 				for (j = 0; j < i; j++) {
647 					H = icmd->un.cont64[j].addrHigh;
648 					L = icmd->un.cont64[j].addrLow;
649 					mp = emlxs_mem_get_vaddr(hba, rp,
650 					    getPaddr(H, L));
651 					if (mp) {
652 						(void) emlxs_mem_put(hba, seg,
653 						    (uint8_t *)mp);
654 					}
655 				}
656 
657 				rp->fc_missbufcnt = cnt + i;
658 
659 				(void) emlxs_mem_put(hba, MEM_IOCB,
660 				    (uint8_t *)iocbq);
661 
662 				return (cnt + i);
663 			}
664 			/*
665 			 * map that page and save the address pair for lookup
666 			 * later
667 			 */
668 			emlxs_mem_map_vaddr(hba, rp, mp,
669 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
670 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
671 
672 			icmd->un.cont64[i].tus.f.bdeSize = size;
673 			icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
674 
675 /*
676  *			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
677  *			    "UB Post: ring=%d addr=%08x%08x size=%d",
678  *			    rp->ringno, icmd->un.cont64[i].addrHigh,
679  *			    icmd->un.cont64[i].addrLow, size);
680  */
681 
682 			cnt--;
683 		}
684 
685 		icmd->ulpIoTag = tag;
686 		icmd->ulpBdeCount = i;
687 		icmd->ulpLe = 1;
688 		icmd->ulpOwner = OWN_CHIP;
689 		iocbq->bp = (uint8_t *)mp;  /* used for delimiter between */
690 					    /* commands */
691 
692 		emlxs_issue_iocb_cmd(hba, rp, iocbq);
693 	}
694 
695 	rp->fc_missbufcnt = 0;
696 
697 	return (0);
698 
699 } /* emlxs_post_buffer() */
700 
701 
702 extern int
703 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
704 {
705 	emlxs_hba_t *hba = HBA;
706 	emlxs_config_t *cfg;
707 	NODELIST *nlp;
708 	fc_affected_id_t *aid;
709 	uint32_t mask;
710 	uint32_t aff_d_id;
711 	uint32_t linkdown;
712 	uint32_t vlinkdown;
713 	uint32_t action;
714 	int i;
715 	uint32_t unreg_vpi;
716 	uint32_t update;
717 	uint32_t adisc_support;
718 
719 	/* Target mode only uses this routine for linkdowns */
720 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
721 		return (0);
722 	}
723 	cfg = &CFG;
724 	aid = (fc_affected_id_t *)&scope;
725 	linkdown = 0;
726 	vlinkdown = 0;
727 	unreg_vpi = 0;
728 	update = 0;
729 
730 	if (!(port->flag & EMLXS_PORT_BOUND)) {
731 		return (0);
732 	}
733 	switch (aid->aff_format) {
734 	case 0:	/* Port */
735 		mask = 0x00ffffff;
736 		break;
737 
738 	case 1:	/* Area */
739 		mask = 0x00ffff00;
740 		break;
741 
742 	case 2:	/* Domain */
743 		mask = 0x00ff0000;
744 		break;
745 
746 	case 3:	/* Network */
747 		mask = 0x00000000;
748 		break;
749 
750 #ifdef DHCHAP_SUPPORT
751 	case 0xfe:	/* Virtual link down */
752 		mask = 0x00000000;
753 		vlinkdown = 1;
754 		break;
755 #endif	/* DHCHAP_SUPPORT */
756 
757 	case 0xff:	/* link is down */
758 		mask = 0x00000000;
759 		linkdown = 1;
760 		break;
761 
762 	}
763 
764 	aff_d_id = aid->aff_d_id & mask;
765 
766 
767 	/* If link is down then this is a hard shutdown and flush */
768 	/*
769 	 * If link not down then this is a soft shutdown and flush (e.g.
770 	 * RSCN)
771 	 */
772 	if (linkdown) {
773 		mutex_enter(&EMLXS_PORT_LOCK);
774 
775 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
776 		port->prev_did = port->did;
777 		port->did = 0;
778 
779 		if (port->ulp_statec != FC_STATE_OFFLINE) {
780 			port->ulp_statec = FC_STATE_OFFLINE;
781 			update = 1;
782 		}
783 		mutex_exit(&EMLXS_PORT_LOCK);
784 
785 		/* Tell ULP about it */
786 		if (update) {
787 			if (port->flag & EMLXS_PORT_BOUND) {
788 				if (port->vpi == 0) {
789 					EMLXS_MSGF(EMLXS_CONTEXT,
790 					    &emlxs_link_down_msg,
791 					    NULL);
792 				}
793 #ifdef SFCT_SUPPORT
794 				if (port->tgt_mode) {
795 					emlxs_fct_link_down(port);
796 
797 				} else if (port->ini_mode) {
798 					port->ulp_statec_cb(port->ulp_handle,
799 					    FC_STATE_OFFLINE);
800 				}
801 #else
802 				port->ulp_statec_cb(port->ulp_handle,
803 				    FC_STATE_OFFLINE);
804 #endif	/* SFCT_SUPPORT */
805 			} else {
806 				if (port->vpi == 0) {
807 					EMLXS_MSGF(EMLXS_CONTEXT,
808 					    &emlxs_link_down_msg,
809 					    "*");
810 				}
811 			}
812 
813 
814 		}
815 		unreg_vpi = 1;
816 
817 #ifdef DHCHAP_SUPPORT
818 		/* Stop authentication with all nodes */
819 		emlxs_dhc_auth_stop(port, NULL);
820 #endif	/* DHCHAP_SUPPORT */
821 
822 		/* Flush the base node */
823 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
824 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
825 
826 		/* Flush any pending ub buffers */
827 		emlxs_ub_flush(port);
828 	}
829 #ifdef DHCHAP_SUPPORT
830 	/* virtual link down */
831 	else if (vlinkdown) {
832 		mutex_enter(&EMLXS_PORT_LOCK);
833 
834 		if (port->ulp_statec != FC_STATE_OFFLINE) {
835 			port->ulp_statec = FC_STATE_OFFLINE;
836 			update = 1;
837 		}
838 		mutex_exit(&EMLXS_PORT_LOCK);
839 
840 		/* Tell ULP about it */
841 		if (update) {
842 			if (port->flag & EMLXS_PORT_BOUND) {
843 				if (port->vpi == 0) {
844 					EMLXS_MSGF(EMLXS_CONTEXT,
845 					    &emlxs_link_down_msg,
846 					    "Switch authentication failed.");
847 				}
848 #ifdef SFCT_SUPPORT
849 				if (port->tgt_mode) {
850 					emlxs_fct_link_down(port);
851 				} else if (port->ini_mode) {
852 					port->ulp_statec_cb(port->ulp_handle,
853 					    FC_STATE_OFFLINE);
854 				}
855 #else
856 				port->ulp_statec_cb(port->ulp_handle,
857 				    FC_STATE_OFFLINE);
858 #endif	/* SFCT_SUPPORT */
859 			} else {
860 				if (port->vpi == 0) {
861 					EMLXS_MSGF(EMLXS_CONTEXT,
862 					    &emlxs_link_down_msg,
863 					    "Switch authentication failed. *");
864 				}
865 			}
866 
867 
868 		}
869 		/* Flush the base node */
870 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
871 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
872 	}
873 #endif	/* DHCHAP_SUPPORT */
874 
875 	if (port->tgt_mode) {
876 		goto done;
877 	}
878 	/* Set the node tags */
879 	/* We will process all nodes with this tag */
880 	rw_enter(&port->node_rwlock, RW_READER);
881 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
882 		nlp = port->node_table[i];
883 		while (nlp != NULL) {
884 			nlp->nlp_tag = 1;
885 			nlp = nlp->nlp_list_next;
886 		}
887 	}
888 	rw_exit(&port->node_rwlock);
889 
890 	if (hba->flag & FC_ONLINE_MODE) {
891 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
892 	} else {
893 		adisc_support = 0;
894 	}
895 
896 	/* Check ADISC support level */
897 	switch (adisc_support) {
898 	case 0:	/* No support - Flush all IO to all matching nodes */
899 
900 		for (; ; ) {
901 			/*
902 			 * We need to hold the locks this way because
903 			 * emlxs_mb_unreg_did and the flush routines enter
904 			 * the same locks. Also, when we release the lock the
905 			 * list can change out from under us.
906 			 */
907 
908 			/* Find first node */
909 			rw_enter(&port->node_rwlock, RW_READER);
910 			action = 0;
911 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
912 				nlp = port->node_table[i];
913 				while (nlp != NULL) {
914 					if (!nlp->nlp_tag) {
915 						nlp = nlp->nlp_list_next;
916 						continue;
917 					}
918 					nlp->nlp_tag = 0;
919 
920 					/*
921 					 * Check for any device that matches
922 					 * our mask
923 					 */
924 					if ((nlp->nlp_DID & mask) == aff_d_id) {
925 						if (linkdown) {
926 							action = 1;
927 							break;
928 						} else {  /* Must be an RCSN */
929 							action = 2;
930 							break;
931 						}
932 					}
933 					nlp = nlp->nlp_list_next;
934 				}
935 
936 				if (action) {
937 					break;
938 				}
939 			}
940 			rw_exit(&port->node_rwlock);
941 
942 
943 			/* Check if nothing was found */
944 			if (action == 0) {
945 				break;
946 			} else if (action == 1) {
947 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
948 				    NULL, NULL, NULL);
949 			} else if (action == 2) {
950 #ifdef DHCHAP_SUPPORT
951 				emlxs_dhc_auth_stop(port, nlp);
952 #endif	/* DHCHAP_SUPPORT */
953 
954 				/* Close the node for any further normal IO */
955 				/* A PLOGI with reopen the node */
956 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
957 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
958 
959 				/* Flush tx queue */
960 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
961 
962 				/* Flush chip queue */
963 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
964 			}
965 		}
966 
967 		break;
968 
969 	case 1:	/* Partial support - Flush IO for non-FCP2 matching * nodes */
970 
971 		for (;;) {
972 
973 			/*
974 			 * We need to hold the locks this way because
975 			 * emlxs_mb_unreg_did and the flush routines enter
976 			 * the same locks. Also, when we release the lock the
977 			 * list can change out from under us.
978 			 */
979 			rw_enter(&port->node_rwlock, RW_READER);
980 			action = 0;
981 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
982 				nlp = port->node_table[i];
983 				while (nlp != NULL) {
984 					if (!nlp->nlp_tag) {
985 						nlp = nlp->nlp_list_next;
986 						continue;
987 					}
988 					nlp->nlp_tag = 0;
989 
990 					/*
991 					 * Check for special FCP2 target
992 					 * device that matches our mask
993 					 */
994 					if ((nlp->nlp_fcp_info &
995 					    NLP_FCP_TGT_DEVICE) &&
996 					    (nlp->nlp_fcp_info &
997 					    NLP_FCP_2_DEVICE) &&
998 					    (nlp->nlp_DID & mask) == aff_d_id) {
999 						action = 3;
1000 						break;
1001 					}
1002 					/*
1003 					 * Check for any other device that
1004 					 * matches our mask
1005 					 */
1006 					else if ((nlp->nlp_DID & mask) ==
1007 					    aff_d_id) {
1008 						if (linkdown) {
1009 							action = 1;
1010 							break;
1011 						} else {   /* Must be an RSCN */
1012 							action = 2;
1013 							break;
1014 						}
1015 					}
1016 					nlp = nlp->nlp_list_next;
1017 				}
1018 
1019 				if (action) {
1020 					break;
1021 				}
1022 			}
1023 			rw_exit(&port->node_rwlock);
1024 
1025 			/* Check if nothing was found */
1026 			if (action == 0) {
1027 				break;
1028 			} else if (action == 1) {
1029 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1030 				    NULL, NULL, NULL);
1031 			} else if (action == 2) {
1032 #ifdef DHCHAP_SUPPORT
1033 				emlxs_dhc_auth_stop(port, nlp);
1034 #endif	/* DHCHAP_SUPPORT */
1035 
1036 				/* Close the node for any further normal IO */
1037 				/* A PLOGI with reopen the node */
1038 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1039 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1040 
1041 				/* Flush tx queue */
1042 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1043 
1044 				/* Flush chip queue */
1045 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1046 			} else if (action == 3) {	/* FCP2 devices */
1047 				unreg_vpi = 0;
1048 
1049 #ifdef DHCHAP_SUPPORT
1050 				emlxs_dhc_auth_stop(port, nlp);
1051 #endif	/* DHCHAP_SUPPORT */
1052 
1053 				/* Close the node for any further normal IO */
1054 				/* An ADISC or a PLOGI with reopen the node */
1055 				emlxs_node_close(port, nlp, FC_FCP_RING,
1056 				    ((linkdown) ? 0 : 60));
1057 				emlxs_node_close(port, nlp, FC_IP_RING,
1058 				    ((linkdown) ? 0 : 60));
1059 
1060 				/* Flush tx queues except for FCP ring */
1061 				(void) emlxs_tx_node_flush(port, nlp,
1062 				    &hba->ring[FC_CT_RING], 0, 0);
1063 				(void) emlxs_tx_node_flush(port, nlp,
1064 				    &hba->ring[FC_ELS_RING], 0, 0);
1065 				(void) emlxs_tx_node_flush(port, nlp,
1066 				    &hba->ring[FC_IP_RING], 0, 0);
1067 
1068 				/* Clear IP XRI */
1069 				nlp->nlp_Xri = 0;
1070 
1071 				/* Flush chip queues except for FCP ring */
1072 				(void) emlxs_chipq_node_flush(port,
1073 				    &hba->ring[FC_CT_RING], nlp, 0);
1074 				(void) emlxs_chipq_node_flush(port,
1075 				    &hba->ring[FC_ELS_RING], nlp, 0);
1076 				(void) emlxs_chipq_node_flush(port,
1077 				    &hba->ring[FC_IP_RING], nlp, 0);
1078 			}
1079 		}
1080 		break;
1081 
1082 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1083 
1084 		if (!linkdown && !vlinkdown) {
1085 			break;
1086 		}
1087 		for (;;) {
1088 			/*
1089 			 * We need to hold the locks this way because
1090 			 * emlxs_mb_unreg_did and the flush routines enter
1091 			 * the same locks. Also, when we release the lock the
1092 			 * list can change out from under us.
1093 			 */
1094 			rw_enter(&port->node_rwlock, RW_READER);
1095 			action = 0;
1096 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1097 				nlp = port->node_table[i];
1098 				while (nlp != NULL) {
1099 					if (!nlp->nlp_tag) {
1100 						nlp = nlp->nlp_list_next;
1101 						continue;
1102 					}
1103 					nlp->nlp_tag = 0;
1104 
1105 					/*
1106 					 * Check for FCP target device that
1107 					 * matches our mask
1108 					 */
1109 					if ((nlp->nlp_fcp_info &
1110 					    NLP_FCP_TGT_DEVICE) &&
1111 					    (nlp->nlp_DID & mask) == aff_d_id) {
1112 						action = 3;
1113 						break;
1114 					}
1115 					/*
1116 					 * Check for any other device that
1117 					 * matches our mask
1118 					 */
1119 					else if ((nlp->nlp_DID & mask) ==
1120 					    aff_d_id) {
1121 						if (linkdown) {
1122 							action = 1;
1123 							break;
1124 						} else { /* Must be an RSCN */
1125 							action = 2;
1126 							break;
1127 						}
1128 					}
1129 					nlp = nlp->nlp_list_next;
1130 				}
1131 				if (action) {
1132 					break;
1133 				}
1134 			}
1135 			rw_exit(&port->node_rwlock);
1136 
1137 			/* Check if nothing was found */
1138 			if (action == 0) {
1139 				break;
1140 			} else if (action == 1) {
1141 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1142 				    NULL, NULL, NULL);
1143 			} else if (action == 2) {
1144 				/* Close the node for any further normal IO */
1145 				/* A PLOGI with reopen the node */
1146 				emlxs_node_close(port, nlp, FC_FCP_RING, 60);
1147 				emlxs_node_close(port, nlp, FC_IP_RING, 60);
1148 
1149 				/* Flush tx queue */
1150 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1151 
1152 				/* Flush chip queue */
1153 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1154 
1155 			} else if (action == 3) {	/* FCP2 devices */
1156 				unreg_vpi = 0;
1157 
1158 				/* Close the node for any further normal IO */
1159 				/* An ADISC or a PLOGI with reopen the node */
1160 				emlxs_node_close(port, nlp, FC_FCP_RING,
1161 				    ((linkdown) ? 0 : 60));
1162 				emlxs_node_close(port, nlp, FC_IP_RING,
1163 				    ((linkdown) ? 0 : 60));
1164 
1165 				/* Flush tx queues except for FCP ring */
1166 				(void) emlxs_tx_node_flush(port, nlp,
1167 				    &hba->ring[FC_CT_RING], 0, 0);
1168 				(void) emlxs_tx_node_flush(port, nlp,
1169 				    &hba->ring[FC_ELS_RING], 0, 0);
1170 				(void) emlxs_tx_node_flush(port, nlp,
1171 				    &hba->ring[FC_IP_RING], 0, 0);
1172 
1173 				/* Clear IP XRI */
1174 				nlp->nlp_Xri = 0;
1175 
1176 				/* Flush chip queues except for FCP ring */
1177 				(void) emlxs_chipq_node_flush(port,
1178 				    &hba->ring[FC_CT_RING], nlp, 0);
1179 				(void) emlxs_chipq_node_flush(port,
1180 				    &hba->ring[FC_ELS_RING], nlp, 0);
1181 				(void) emlxs_chipq_node_flush(port,
1182 				    &hba->ring[FC_IP_RING], nlp, 0);
1183 			}
1184 		}
1185 
1186 		break;
1187 
1188 
1189 	}	/* switch() */
1190 
1191 done:
1192 
1193 	if (unreg_vpi) {
1194 		(void) emlxs_mb_unreg_vpi(port);
1195 	}
1196 	return (0);
1197 
1198 } /* emlxs_port_offline() */
1199 
1200 
1201 
1202 extern void
1203 emlxs_port_online(emlxs_port_t *vport)
1204 {
1205 	emlxs_hba_t *hba = vport->hba;
1206 	emlxs_port_t *port = &PPORT;
1207 	uint32_t state;
1208 	uint32_t update;
1209 	uint32_t npiv_linkup;
1210 	char topology[32];
1211 	char linkspeed[32];
1212 	char mode[32];
1213 
1214 	/*
1215 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "linkup_callback.
1216 	 * vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1217 	 */
1218 
1219 	if ((vport->vpi > 0) &&
1220 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1221 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1222 		return;
1223 	}
1224 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1225 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1226 		return;
1227 	}
1228 	mutex_enter(&EMLXS_PORT_LOCK);
1229 
1230 	/* Check for mode */
1231 	if (port->tgt_mode) {
1232 		(void) strcpy(mode, ", target");
1233 	} else if (port->ini_mode) {
1234 		(void) strcpy(mode, ", initiator");
1235 	} else {
1236 		(void) strcpy(mode, "");
1237 	}
1238 
1239 	/* Check for loop topology */
1240 	if (hba->topology == TOPOLOGY_LOOP) {
1241 		state = FC_STATE_LOOP;
1242 		(void) strcpy(topology, ", loop");
1243 	} else {
1244 		state = FC_STATE_ONLINE;
1245 		(void) strcpy(topology, ", fabric");
1246 	}
1247 
1248 	/* Set the link speed */
1249 	switch (hba->linkspeed) {
1250 	case 0:
1251 		(void) strcpy(linkspeed, "Gb");
1252 		state |= FC_STATE_1GBIT_SPEED;
1253 		break;
1254 
1255 	case LA_1GHZ_LINK:
1256 		(void) strcpy(linkspeed, "1Gb");
1257 		state |= FC_STATE_1GBIT_SPEED;
1258 		break;
1259 	case LA_2GHZ_LINK:
1260 		(void) strcpy(linkspeed, "2Gb");
1261 		state |= FC_STATE_2GBIT_SPEED;
1262 		break;
1263 	case LA_4GHZ_LINK:
1264 		(void) strcpy(linkspeed, "4Gb");
1265 		state |= FC_STATE_4GBIT_SPEED;
1266 		break;
1267 	case LA_8GHZ_LINK:
1268 		(void) strcpy(linkspeed, "8Gb");
1269 		state |= FC_STATE_8GBIT_SPEED;
1270 		break;
1271 	case LA_10GHZ_LINK:
1272 		(void) strcpy(linkspeed, "10Gb");
1273 		state |= FC_STATE_10GBIT_SPEED;
1274 		break;
1275 	default:
1276 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1277 		break;
1278 	}
1279 
1280 	npiv_linkup = 0;
1281 	update = 0;
1282 
1283 	if ((hba->state >= FC_LINK_UP) &&
1284 	    !(hba->flag & FC_LOOPBACK_MODE) &&
1285 	    (vport->ulp_statec != state)) {
1286 		update = 1;
1287 		vport->ulp_statec = state;
1288 
1289 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1290 			hba->flag |= FC_NPIV_LINKUP;
1291 			npiv_linkup = 1;
1292 		}
1293 	}
1294 	mutex_exit(&EMLXS_PORT_LOCK);
1295 
1296 	/*
1297 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg, "linkup_callback:
1298 	 * update=%d vpi=%d flag=%d fc_flag=%x state=%x statec=%x", update,
1299 	 * vport->vpi, npiv_linkup, hba->flag, hba->state,
1300 	 * vport->ulp_statec);
1301 	 */
1302 	if (update) {
1303 		if (vport->flag & EMLXS_PORT_BOUND) {
1304 			if (vport->vpi == 0) {
1305 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1306 				    "%s%s%s",
1307 				    linkspeed, topology, mode);
1308 			} else if (npiv_linkup) {
1309 				EMLXS_MSGF(EMLXS_CONTEXT,
1310 				    &emlxs_npiv_link_up_msg,
1311 				    "%s%s%s",
1312 				    linkspeed, topology, mode);
1313 			}
1314 #ifdef SFCT_SUPPORT
1315 			if (vport->tgt_mode) {
1316 				emlxs_fct_link_up(vport);
1317 			} else if (vport->ini_mode) {
1318 				vport->ulp_statec_cb(vport->ulp_handle, state);
1319 			}
1320 #else
1321 			vport->ulp_statec_cb(vport->ulp_handle, state);
1322 #endif	/* SFCT_SUPPORT */
1323 		} else {
1324 			if (vport->vpi == 0) {
1325 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1326 				    "%s%s%s *",
1327 				    linkspeed, topology, mode);
1328 			} else if (npiv_linkup) {
1329 				EMLXS_MSGF(EMLXS_CONTEXT,
1330 				    &emlxs_npiv_link_up_msg,
1331 				    "%s%s%s *",
1332 				    linkspeed, topology, mode);
1333 			}
1334 		}
1335 
1336 		/* Check for waiting threads */
1337 		if (vport->vpi == 0) {
1338 			mutex_enter(&EMLXS_LINKUP_LOCK);
1339 			if (hba->linkup_wait_flag == TRUE) {
1340 				hba->linkup_wait_flag = FALSE;
1341 				cv_broadcast(&EMLXS_LINKUP_CV);
1342 			}
1343 			mutex_exit(&EMLXS_LINKUP_LOCK);
1344 		}
1345 		/* Flush any pending ub buffers */
1346 		emlxs_ub_flush(vport);
1347 	}
1348 	return;
1349 
1350 } /* emlxs_port_online() */
1351 
1352 
1353 extern void
1354 emlxs_linkdown(emlxs_hba_t *hba)
1355 {
1356 	emlxs_port_t *port = &PPORT;
1357 	int i;
1358 
1359 	mutex_enter(&EMLXS_PORT_LOCK);
1360 
1361 	HBASTATS.LinkDown++;
1362 	emlxs_ffstate_change_locked(hba, FC_LINK_DOWN);
1363 
1364 	/* Filter hba flags */
1365 	hba->flag &= FC_LINKDOWN_MASK;
1366 	hba->discovery_timer = 0;
1367 	hba->linkup_timer = 0;
1368 
1369 	mutex_exit(&EMLXS_PORT_LOCK);
1370 
1371 	for (i = 0; i < MAX_VPORTS; i++) {
1372 		port = &VPORT(i);
1373 
1374 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1375 			continue;
1376 		}
1377 		(void) emlxs_port_offline(port, 0xffffffff);
1378 
1379 	}
1380 
1381 	return;
1382 
1383 } /* emlxs_linkdown() */
1384 
1385 
1386 extern void
1387 emlxs_linkup(emlxs_hba_t *hba)
1388 {
1389 	emlxs_port_t *port = &PPORT;
1390 	emlxs_config_t *cfg = &CFG;
1391 
1392 	mutex_enter(&EMLXS_PORT_LOCK);
1393 
1394 	HBASTATS.LinkUp++;
1395 	emlxs_ffstate_change_locked(hba, FC_LINK_UP);
1396 
1397 #ifdef MENLO_TEST
1398 	if ((hba->model_info.device_id == PCI_DEVICE_ID_LP21000_M) &&
1399 	    (cfg[CFG_HORNET_FLOGI].current == 0)) {
1400 		hba->flag |= FC_MENLO_MODE;
1401 	}
1402 #endif	/* MENLO_TEST */
1403 
1404 #ifdef MENLO_SUPPORT
1405 	if (hba->flag & FC_MENLO_MODE) {
1406 		mutex_exit(&EMLXS_PORT_LOCK);
1407 
1408 		/*
1409 		 * Trigger linkup CV and don't start linkup & discovery
1410 		 * timers
1411 		 */
1412 		mutex_enter(&EMLXS_LINKUP_LOCK);
1413 		cv_broadcast(&EMLXS_LINKUP_CV);
1414 		mutex_exit(&EMLXS_LINKUP_LOCK);
1415 
1416 		return;
1417 	}
1418 #endif	/* MENLO_SUPPORT */
1419 
1420 	/* Set the linkup & discovery timers */
1421 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1422 	hba->discovery_timer = hba->timer_tics +
1423 	    cfg[CFG_LINKUP_TIMEOUT].current + cfg[CFG_DISC_TIMEOUT].current;
1424 
1425 	mutex_exit(&EMLXS_PORT_LOCK);
1426 
1427 	return;
1428 
1429 } /* emlxs_linkup() */
1430 
1431 
1432 /*
1433  *  emlxs_reset_link
1434  *
1435  *  Description:
1436  *  Called to reset the link with an init_link
1437  *
1438  *    Returns:
1439  *
1440  */
1441 extern int
1442 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup)
1443 {
1444 	emlxs_port_t *port = &PPORT;
1445 	emlxs_config_t *cfg;
1446 	MAILBOX *mb;
1447 
1448 	/*
1449 	 * Get a buffer to use for the mailbox command
1450 	 */
1451 	if ((mb = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX | MEM_PRI)) == NULL) {
1452 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1453 		    "Unable to allocate mailbox buffer.");
1454 
1455 		return (1);
1456 	}
1457 	cfg = &CFG;
1458 
1459 	if (linkup) {
1460 		/*
1461 		 * Setup and issue mailbox INITIALIZE LINK command
1462 		 */
1463 
1464 		emlxs_mb_init_link(hba, (MAILBOX *)mb,
1465 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1466 
1467 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1468 
1469 		/* Clear the loopback mode */
1470 		mutex_enter(&EMLXS_PORT_LOCK);
1471 		hba->flag &= ~FC_LOOPBACK_MODE;
1472 		mutex_exit(&EMLXS_PORT_LOCK);
1473 
1474 		if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mb,
1475 		    MBX_NOWAIT, 0) != MBX_BUSY) {
1476 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1477 		}
1478 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1479 
1480 	} else {	/* hold link down */
1481 		emlxs_mb_down_link(hba, (MAILBOX *)mb);
1482 
1483 		if (emlxs_mb_issue_cmd(hba, (MAILBOX *)mb,
1484 		    MBX_NOWAIT, 0) != MBX_BUSY) {
1485 			(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1486 		}
1487 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1488 		    "Disabling link...");
1489 	}
1490 
1491 	return (0);
1492 
1493 } /* emlxs_reset_link() */
1494 
1495 
1496 extern int
1497 emlxs_online(emlxs_hba_t *hba)
1498 {
1499 	emlxs_port_t *port = &PPORT;
1500 	int32_t rval = 0;
1501 	uint32_t i = 0;
1502 
1503 	/* Make sure adapter is offline or exit trying (30 seconds) */
1504 	for (; ; ) {
1505 		/* Check if adapter is already going online */
1506 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1507 			return (0);
1508 		}
1509 		mutex_enter(&EMLXS_PORT_LOCK);
1510 
1511 		/* Check again */
1512 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1513 			mutex_exit(&EMLXS_PORT_LOCK);
1514 			return (0);
1515 		}
1516 		/* Check if adapter is offline */
1517 		if (hba->flag & FC_OFFLINE_MODE) {
1518 			/* Mark it going online */
1519 			hba->flag &= ~FC_OFFLINE_MODE;
1520 			hba->flag |= FC_ONLINING_MODE;
1521 
1522 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1523 			mutex_exit(&EMLXS_PORT_LOCK);
1524 			break;
1525 		}
1526 		mutex_exit(&EMLXS_PORT_LOCK);
1527 
1528 		if (i++ > 30) {
1529 			/* Return on timeout */
1530 			return (1);
1531 		}
1532 		DELAYMS(1000);
1533 	}
1534 
1535 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1536 	    "Going online...");
1537 
1538 	if (hba->bus_type == SBUS_FC) {
1539 		(void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba,
1540 		    hba->sbus_csr_addr));
1541 	}
1542 	if (rval = emlxs_ffinit(hba)) {
1543 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1544 		    "status=%x",
1545 		    rval);
1546 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1547 
1548 		/* Set FC_OFFLINE_MODE */
1549 		mutex_enter(&EMLXS_PORT_LOCK);
1550 		emlxs_diag_state = DDI_OFFDI;
1551 		hba->flag |= FC_OFFLINE_MODE;
1552 		hba->flag &= ~FC_ONLINING_MODE;
1553 		mutex_exit(&EMLXS_PORT_LOCK);
1554 
1555 		return (rval);
1556 	}
1557 	/* Start the timer */
1558 	emlxs_timer_start(hba);
1559 
1560 	/* Set FC_ONLINE_MODE */
1561 	mutex_enter(&EMLXS_PORT_LOCK);
1562 	emlxs_diag_state = DDI_ONDI;
1563 	hba->flag |= FC_ONLINE_MODE;
1564 	hba->flag &= ~FC_ONLINING_MODE;
1565 	mutex_exit(&EMLXS_PORT_LOCK);
1566 
1567 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1568 
1569 #ifdef SFCT_SUPPORT
1570 	(void) emlxs_fct_port_initialize(port);
1571 #endif	/* SFCT_SUPPORT */
1572 
1573 	return (rval);
1574 
1575 } /* emlxs_online() */
1576 
1577 
1578 extern int
1579 emlxs_offline(emlxs_hba_t *hba)
1580 {
1581 	emlxs_port_t *port = &PPORT;
1582 	uint32_t i = 0;
1583 	int rval = 1;
1584 
1585 	/* Make sure adapter is online or exit trying (30 seconds) */
1586 	for (; ; ) {
1587 		/* Check if adapter is already going offline */
1588 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1589 			return (0);
1590 		}
1591 		mutex_enter(&EMLXS_PORT_LOCK);
1592 
1593 		/* Check again */
1594 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1595 			mutex_exit(&EMLXS_PORT_LOCK);
1596 			return (0);
1597 		}
1598 		/* Check if adapter is online */
1599 		if (hba->flag & FC_ONLINE_MODE) {
1600 			/* Mark it going offline */
1601 			hba->flag &= ~FC_ONLINE_MODE;
1602 			hba->flag |= FC_OFFLINING_MODE;
1603 
1604 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1605 			mutex_exit(&EMLXS_PORT_LOCK);
1606 			break;
1607 		}
1608 		mutex_exit(&EMLXS_PORT_LOCK);
1609 
1610 		if (i++ > 30) {
1611 			/* Return on timeout */
1612 			return (1);
1613 		}
1614 		DELAYMS(1000);
1615 	}
1616 
1617 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg, "Going offline...");
1618 
1619 	if (port->ini_mode) {
1620 		/* Flush all IO */
1621 		emlxs_linkdown(hba);
1622 
1623 	}
1624 #ifdef SFCT_SUPPORT
1625 	else {
1626 		(void) emlxs_fct_port_shutdown(port);
1627 	}
1628 #endif	/* SFCT_SUPPORT */
1629 
1630 	/* Check if adapter was shutdown */
1631 	if (hba->flag & FC_HARDWARE_ERROR) {
1632 		/* Force mailbox cleanup */
1633 		/* This will wake any sleeping or polling threads */
1634 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1635 	}
1636 	/* Pause here for the IO to settle */
1637 	delay(drv_usectohz(1000000));	/* 1 sec */
1638 
1639 	/* Unregister all nodes */
1640 	emlxs_ffcleanup(hba);
1641 
1642 
1643 	if (hba->bus_type == SBUS_FC) {
1644 		WRITE_SBUS_CSR_REG(hba,
1645 		    FC_SHS_REG(hba, hba->sbus_csr_addr), 0x9A);
1646 	}
1647 	/* Stop the timer */
1648 	emlxs_timer_stop(hba);
1649 
1650 	/* For safety flush every iotag list */
1651 	if (emlxs_iotag_flush(hba)) {
1652 		/* Pause here for the IO to flush */
1653 		DELAYMS(1000);
1654 	}
1655 	/* Interlock the adapter to take it down */
1656 	(void) emlxs_interlock(hba);
1657 
1658 	/* Free all the shared memory */
1659 	(void) emlxs_mem_free_buffer(hba);
1660 
1661 	mutex_enter(&EMLXS_PORT_LOCK);
1662 	hba->flag |= FC_OFFLINE_MODE;
1663 	hba->flag &= ~FC_OFFLINING_MODE;
1664 	emlxs_diag_state = DDI_OFFDI;
1665 	mutex_exit(&EMLXS_PORT_LOCK);
1666 
1667 	rval = 0;
1668 
1669 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1670 
1671 done:
1672 
1673 	return (rval);
1674 
1675 } /* emlxs_offline() */
1676 
1677 
1678 
1679 extern int
1680 emlxs_power_down(emlxs_hba_t *hba)
1681 {
1682 	int32_t rval = 0;
1683 	uint32_t *ptr;
1684 	uint32_t i;
1685 
1686 	if ((rval = emlxs_offline(hba))) {
1687 		return (rval);
1688 	}
1689 	/* Save pci config space */
1690 	ptr = (uint32_t *)hba->pm_config;
1691 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1692 		*ptr = ddi_get32(hba->pci_acc_handle,
1693 		    (uint32_t *)(hba->pci_addr + i));
1694 	}
1695 
1696 	/* Put chip in D3 state */
1697 	(void) ddi_put8(hba->pci_acc_handle,
1698 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1699 	    (uint8_t)PCI_PM_D3_STATE);
1700 
1701 	return (0);
1702 
1703 } /* End emlxs_power_down */
1704 
1705 
1706 extern int
1707 emlxs_power_up(emlxs_hba_t *hba)
1708 {
1709 	int32_t rval = 0;
1710 	uint32_t *ptr;
1711 	uint32_t i;
1712 
1713 
1714 	/* Take chip out of D3 state */
1715 	(void) ddi_put8(hba->pci_acc_handle,
1716 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1717 	    (uint8_t)PCI_PM_D0_STATE);
1718 
1719 	/* Must have at least 10 ms delay here */
1720 	DELAYMS(100);
1721 
1722 	/* Restore pci config space */
1723 	ptr = (uint32_t *)hba->pm_config;
1724 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1725 		(void) ddi_put32(hba->pci_acc_handle,
1726 		    (uint32_t *)(hba->pci_addr + i), *ptr);
1727 	}
1728 
1729 	/* Bring adapter online */
1730 	if ((rval = emlxs_online(hba))) {
1731 		(void) ddi_put8(hba->pci_acc_handle,
1732 		    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1733 		    (uint8_t)PCI_PM_D3_STATE);
1734 
1735 		return (rval);
1736 	}
1737 	return (rval);
1738 
1739 } /* End emlxs_power_up */
1740 
1741 
1742 /*
1743  * NAME:     emlxs_ffcleanup
1744  *
1745  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1746  *
1747  * EXECUTION ENVIRONMENT: process only
1748  *
1749  * CALLED FROM: CFG_TERM
1750  *
1751  * INPUT: hba       - pointer to the dev_ctl area.
1752  *
1753  * RETURNS: none
1754  */
1755 extern void
1756 emlxs_ffcleanup(emlxs_hba_t *hba)
1757 {
1758 	emlxs_port_t *port = &PPORT;
1759 	uint32_t j;
1760 
1761 	/* Disable all but the mailbox interrupt */
1762 	hba->hc_copy = HC_MBINT_ENA;
1763 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1764 
1765 	/* Make sure all port nodes are destroyed */
1766 	for (j = 0; j < MAX_VPORTS; j++) {
1767 		port = &VPORT(j);
1768 
1769 		if (port->node_count) {
1770 			(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0, 0);
1771 		}
1772 	}
1773 
1774 	/* Clear all interrupt enable conditions */
1775 	hba->hc_copy = 0;
1776 	WRITE_CSR_REG(hba, FC_HC_REG(hba, hba->csr_addr), hba->hc_copy);
1777 
1778 	return;
1779 
1780 } /* emlxs_ffcleanup() */
1781 
1782 
1783 extern uint16_t
1784 emlxs_register_pkt(RING *rp, emlxs_buf_t *sbp)
1785 {
1786 	emlxs_hba_t *hba;
1787 	emlxs_port_t *port;
1788 	uint16_t iotag;
1789 	uint32_t i;
1790 
1791 	hba = rp->hba;
1792 
1793 	mutex_enter(&EMLXS_FCTAB_LOCK(rp->ringno));
1794 
1795 	if (sbp->iotag != 0) {
1796 		port = &PPORT;
1797 
1798 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1799 		    "Pkt already registered! ringo=%d iotag=%d sbp=%p",
1800 		    sbp->ring, sbp->iotag, sbp);
1801 	}
1802 	iotag = 0;
1803 	for (i = 0; i < rp->max_iotag; i++) {
1804 		if (!rp->fc_iotag || rp->fc_iotag >= rp->max_iotag) {
1805 			rp->fc_iotag = 1;
1806 		}
1807 		iotag = rp->fc_iotag++;
1808 
1809 		if (rp->fc_table[iotag] == 0 ||
1810 		    rp->fc_table[iotag] == STALE_PACKET) {
1811 			hba->io_count[rp->ringno]++;
1812 			rp->fc_table[iotag] = sbp;
1813 
1814 			sbp->iotag = iotag;
1815 			sbp->ring = rp;
1816 
1817 			break;
1818 		}
1819 		iotag = 0;
1820 	}
1821 
1822 	mutex_exit(&EMLXS_FCTAB_LOCK(rp->ringno));
1823 
1824 	/*
1825 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1826 	 * "emlxs_register_pkt: ringo=%d iotag=%d sbp=%p", rp->ringno, iotag,
1827 	 * sbp);
1828 	 */
1829 
1830 	return (iotag);
1831 
1832 } /* emlxs_register_pkt() */
1833 
1834 
1835 
1836 extern emlxs_buf_t *
1837 emlxs_unregister_pkt(RING *rp, uint16_t iotag, uint32_t forced)
1838 {
1839 	emlxs_hba_t *hba;
1840 	emlxs_buf_t *sbp;
1841 	uint32_t ringno;
1842 
1843 	/* Check the iotag range */
1844 	if ((iotag == 0) || (iotag >= rp->max_iotag)) {
1845 		return (NULL);
1846 	}
1847 	sbp = NULL;
1848 	hba = rp->hba;
1849 	ringno = rp->ringno;
1850 
1851 	/* Remove the sbp from the table */
1852 	mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
1853 	sbp = rp->fc_table[iotag];
1854 
1855 	if (!sbp || (sbp == STALE_PACKET)) {
1856 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1857 		return (sbp);
1858 	}
1859 	rp->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1860 	hba->io_count[ringno]--;
1861 	sbp->iotag = 0;
1862 
1863 	mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
1864 
1865 
1866 	/* Clean up the sbp */
1867 	mutex_enter(&sbp->mtx);
1868 
1869 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
1870 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
1871 		hba->ring_tx_count[ringno]--;
1872 	}
1873 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
1874 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
1875 	}
1876 	if (sbp->bmp) {
1877 		(void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
1878 		sbp->bmp = 0;
1879 	}
1880 	mutex_exit(&sbp->mtx);
1881 
1882 
1883 	/*
1884 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1885 	 * "emlxs_unregister_pkt: ringo=%d iotag=%d sbp=%p", rp->ringno,
1886 	 * iotag, sbp);
1887 	 */
1888 
1889 	return (sbp);
1890 
1891 } /* emlxs_unregister_pkt() */
1892 
1893 
1894 
1895 /* Flush all IO's to all nodes for a given ring */
1896 extern uint32_t
1897 emlxs_tx_ring_flush(emlxs_hba_t *hba, RING *rp, emlxs_buf_t *fpkt)
1898 {
1899 	emlxs_port_t *port = &PPORT;
1900 	emlxs_buf_t *sbp;
1901 	IOCBQ *iocbq;
1902 	IOCBQ *next;
1903 	IOCB *iocb;
1904 	uint32_t ringno;
1905 	Q abort;
1906 	NODELIST *ndlp;
1907 	IOCB *icmd;
1908 	MATCHMAP *mp;
1909 	uint32_t i;
1910 
1911 	ringno = rp->ringno;
1912 	bzero((void *)&abort, sizeof (Q));
1913 
1914 	mutex_enter(&EMLXS_RINGTX_LOCK);
1915 
1916 	/* While a node needs servicing */
1917 	while (rp->nodeq.q_first) {
1918 		ndlp = (NODELIST *)rp->nodeq.q_first;
1919 
1920 		/* Check if priority queue is not empty */
1921 		if (ndlp->nlp_ptx[ringno].q_first) {
1922 			/* Transfer all iocb's to local queue */
1923 			if (abort.q_first == 0) {
1924 				abort.q_first = ndlp->nlp_ptx[ringno].q_first;
1925 				abort.q_last = ndlp->nlp_ptx[ringno].q_last;
1926 			} else {
1927 				((IOCBQ *)abort.q_last)->next =
1928 				    (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
1929 			}
1930 
1931 			abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
1932 		}
1933 		/* Check if tx queue is not empty */
1934 		if (ndlp->nlp_tx[ringno].q_first) {
1935 			/* Transfer all iocb's to local queue */
1936 			if (abort.q_first == 0) {
1937 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
1938 				abort.q_last = ndlp->nlp_tx[ringno].q_last;
1939 			} else {
1940 				((IOCBQ *)abort.q_last)->next =
1941 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
1942 			}
1943 
1944 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
1945 
1946 		}
1947 		/* Clear the queue pointers */
1948 		ndlp->nlp_ptx[ringno].q_first = NULL;
1949 		ndlp->nlp_ptx[ringno].q_last = NULL;
1950 		ndlp->nlp_ptx[ringno].q_cnt = 0;
1951 
1952 		ndlp->nlp_tx[ringno].q_first = NULL;
1953 		ndlp->nlp_tx[ringno].q_last = NULL;
1954 		ndlp->nlp_tx[ringno].q_cnt = 0;
1955 
1956 		/* Remove node from service queue */
1957 
1958 		/* If this is the last node on list */
1959 		if (rp->nodeq.q_last == (void *)ndlp) {
1960 			rp->nodeq.q_last = NULL;
1961 			rp->nodeq.q_first = NULL;
1962 			rp->nodeq.q_cnt = 0;
1963 		} else {
1964 			/* Remove node from head */
1965 			rp->nodeq.q_first = ndlp->nlp_next[ringno];
1966 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
1967 			    rp->nodeq.q_first;
1968 			rp->nodeq.q_cnt--;
1969 		}
1970 
1971 		/* Clear node */
1972 		ndlp->nlp_next[ringno] = NULL;
1973 	}
1974 
1975 	/* First cleanup the iocb's while still holding the lock */
1976 	iocbq = (IOCBQ *)abort.q_first;
1977 	while (iocbq) {
1978 		/* Free the IoTag and the bmp */
1979 		iocb = &iocbq->iocb;
1980 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
1981 
1982 		if (sbp && (sbp != STALE_PACKET)) {
1983 			mutex_enter(&sbp->mtx);
1984 
1985 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
1986 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
1987 				hba->ring_tx_count[ringno]--;
1988 			}
1989 			sbp->pkt_flags |= PACKET_IN_FLUSH;
1990 
1991 			/*
1992 			 * If the fpkt is already set, then we will leave it
1993 			 * alone
1994 			 */
1995 			/*
1996 			 * This ensures that this pkt is only accounted for
1997 			 * on one fpkt->flush_count
1998 			 */
1999 			if (!sbp->fpkt && fpkt) {
2000 				mutex_enter(&fpkt->mtx);
2001 				sbp->fpkt = fpkt;
2002 				fpkt->flush_count++;
2003 				mutex_exit(&fpkt->mtx);
2004 			}
2005 			mutex_exit(&sbp->mtx);
2006 		}
2007 		iocbq = (IOCBQ *)iocbq->next;
2008 
2009 	}	/* end of while */
2010 
2011 	mutex_exit(&EMLXS_RINGTX_LOCK);
2012 
2013 	/* Now abort the iocb's */
2014 	iocbq = (IOCBQ *)abort.q_first;
2015 	while (iocbq) {
2016 		/* Save the next iocbq for now */
2017 		next = (IOCBQ *)iocbq->next;
2018 
2019 		/* Unlink this iocbq */
2020 		iocbq->next = NULL;
2021 
2022 		/* Get the pkt */
2023 		sbp = (emlxs_buf_t *)iocbq->sbp;
2024 
2025 		if (sbp) {
2026 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2027 			    "tx: sbp=%p node=%p",
2028 			    sbp, sbp->node);
2029 
2030 			if (hba->state >= FC_LINK_UP) {
2031 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2032 				    IOERR_ABORT_REQUESTED, 1);
2033 			} else {
2034 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2035 				    IOERR_LINK_DOWN, 1);
2036 			}
2037 
2038 		}
2039 		/* Free the iocb and its associated buffers */
2040 		else {
2041 			icmd = &iocbq->iocb;
2042 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2043 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2044 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2045 				if ((hba->flag &
2046 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2047 					/* HBA is detaching or offlining */
2048 					if (icmd->ulpCommand !=
2049 					    CMD_QUE_RING_LIST64_CN) {
2050 						uint8_t *tmp;
2051 
2052 						for (i = 0;
2053 						    i < icmd->ulpBdeCount;
2054 						    i++) {
2055 
2056 							mp = EMLXS_GET_VADDR(
2057 							    hba, rp, icmd);
2058 
2059 							tmp = (uint8_t *)mp;
2060 							if (mp) {
2061 	(void) emlxs_mem_put(hba, MEM_BUF, tmp);
2062 							}
2063 						}
2064 					}
2065 					(void) emlxs_mem_put(hba, MEM_IOCB,
2066 					    (uint8_t *)iocbq);
2067 				} else {
2068 					/* repost the unsolicited buffer */
2069 					emlxs_issue_iocb_cmd(hba, rp, iocbq);
2070 				}
2071 			}
2072 		}
2073 
2074 		iocbq = next;
2075 
2076 	}	/* end of while */
2077 
2078 	return (abort.q_cnt);
2079 
2080 } /* emlxs_tx_ring_flush() */
2081 
2082 
2083 /* Flush all IO's on all or a given ring for a given node */
2084 extern uint32_t
2085 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, RING *ring,
2086     uint32_t shutdown, emlxs_buf_t *fpkt)
2087 {
2088 	emlxs_hba_t *hba = HBA;
2089 	emlxs_buf_t *sbp;
2090 	uint32_t ringno;
2091 	RING *rp;
2092 	IOCB *icmd;
2093 	IOCBQ *iocbq;
2094 	NODELIST *prev;
2095 	IOCBQ *next;
2096 	IOCB *iocb;
2097 	Q abort;
2098 	uint32_t i;
2099 	MATCHMAP *mp;
2100 
2101 
2102 	bzero((void *)&abort, sizeof (Q));
2103 
2104 	/* Flush all I/O's on tx queue to this target */
2105 	mutex_enter(&EMLXS_RINGTX_LOCK);
2106 
2107 	if (!ndlp->nlp_base && shutdown) {
2108 		ndlp->nlp_active = 0;
2109 	}
2110 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2111 		rp = &hba->ring[ringno];
2112 
2113 		if (ring && rp != ring) {
2114 			continue;
2115 		}
2116 		if (!ndlp->nlp_base || shutdown) {
2117 			/* Check if priority queue is not empty */
2118 			if (ndlp->nlp_ptx[ringno].q_first) {
2119 				/* Transfer all iocb's to local queue */
2120 				if (abort.q_first == 0) {
2121 					abort.q_first =
2122 					    ndlp->nlp_ptx[ringno].q_first;
2123 					abort.q_last =
2124 					    ndlp->nlp_ptx[ringno].q_last;
2125 				} else {
2126 					emlxs_queue_t *q;
2127 
2128 					q = &ndlp->nlp_ptx[ringno];
2129 					((IOCBQ *)abort.q_last)->next =
2130 					    (IOCBQ *)q->q_first;
2131 					/*
2132 					 * ((IOCBQ *)abort.q_last)->next =
2133 					 * (IOCBQ *)
2134 					 * ndlp->nlp_ptx[ringno].q_first;
2135 					 */
2136 				}
2137 
2138 				abort.q_cnt += ndlp->nlp_ptx[ringno].q_cnt;
2139 			}
2140 		}
2141 		/* Check if tx queue is not empty */
2142 		if (ndlp->nlp_tx[ringno].q_first) {
2143 			/* Transfer all iocb's to local queue */
2144 			if (abort.q_first == 0) {
2145 				abort.q_first = ndlp->nlp_tx[ringno].q_first;
2146 				abort.q_last = ndlp->nlp_tx[ringno].q_last;
2147 			} else {
2148 				((IOCBQ *)abort.q_last)->next =
2149 				    (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2150 			}
2151 
2152 			abort.q_cnt += ndlp->nlp_tx[ringno].q_cnt;
2153 		}
2154 		/* Clear the queue pointers */
2155 		ndlp->nlp_ptx[ringno].q_first = NULL;
2156 		ndlp->nlp_ptx[ringno].q_last = NULL;
2157 		ndlp->nlp_ptx[ringno].q_cnt = 0;
2158 
2159 		ndlp->nlp_tx[ringno].q_first = NULL;
2160 		ndlp->nlp_tx[ringno].q_last = NULL;
2161 		ndlp->nlp_tx[ringno].q_cnt = 0;
2162 
2163 		/* If this node was on the ring queue, remove it */
2164 		if (ndlp->nlp_next[ringno]) {
2165 			/* If this is the only node on list */
2166 			if (rp->nodeq.q_first == (void *)ndlp &&
2167 			    rp->nodeq.q_last == (void *)ndlp) {
2168 				rp->nodeq.q_last = NULL;
2169 				rp->nodeq.q_first = NULL;
2170 				rp->nodeq.q_cnt = 0;
2171 			} else if (rp->nodeq.q_first == (void *)ndlp) {
2172 				NODELIST *nd;
2173 
2174 				rp->nodeq.q_first = ndlp->nlp_next[ringno];
2175 				nd = (NODELIST *)rp->nodeq.q_last;
2176 				nd->nlp_next[ringno] = rp->nodeq.q_first;
2177 				rp->nodeq.q_cnt--;
2178 			} else {	/* This is a little more difficult */
2179 				/*
2180 				 * Find the previous node in the circular
2181 				 * ring queue
2182 				 */
2183 				prev = ndlp;
2184 				while (prev->nlp_next[ringno] != ndlp) {
2185 					prev = prev->nlp_next[ringno];
2186 				}
2187 
2188 				prev->nlp_next[ringno] = ndlp->nlp_next[ringno];
2189 
2190 				if (rp->nodeq.q_last == (void *)ndlp) {
2191 					rp->nodeq.q_last = (void *)prev;
2192 				}
2193 				rp->nodeq.q_cnt--;
2194 
2195 			}
2196 
2197 			/* Clear node */
2198 			ndlp->nlp_next[ringno] = NULL;
2199 		}
2200 	}
2201 
2202 	/* First cleanup the iocb's while still holding the lock */
2203 	iocbq = (IOCBQ *)abort.q_first;
2204 	while (iocbq) {
2205 		/* Free the IoTag and the bmp */
2206 		iocb = &iocbq->iocb;
2207 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2208 
2209 		if (sbp && (sbp != STALE_PACKET)) {
2210 			mutex_enter(&sbp->mtx);
2211 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2212 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2213 				hba->ring_tx_count[ring->ringno]--;
2214 			}
2215 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2216 
2217 			/*
2218 			 * If the fpkt is already set, then we will leave it
2219 			 * alone
2220 			 */
2221 			/*
2222 			 * This ensures that this pkt is only accounted for
2223 			 * on one fpkt->flush_count
2224 			 */
2225 			if (!sbp->fpkt && fpkt) {
2226 				mutex_enter(&fpkt->mtx);
2227 				sbp->fpkt = fpkt;
2228 				fpkt->flush_count++;
2229 				mutex_exit(&fpkt->mtx);
2230 			}
2231 			mutex_exit(&sbp->mtx);
2232 		}
2233 		iocbq = (IOCBQ *)iocbq->next;
2234 
2235 	}	/* end of while */
2236 
2237 	mutex_exit(&EMLXS_RINGTX_LOCK);
2238 
2239 	/* Now abort the iocb's outside the locks */
2240 	iocbq = (IOCBQ *)abort.q_first;
2241 	while (iocbq) {
2242 		/* Save the next iocbq for now */
2243 		next = (IOCBQ *)iocbq->next;
2244 
2245 		/* Unlink this iocbq */
2246 		iocbq->next = NULL;
2247 
2248 		/* Get the pkt */
2249 		sbp = (emlxs_buf_t *)iocbq->sbp;
2250 
2251 		if (sbp) {
2252 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2253 			    "tx: sbp=%p node=%p",
2254 			    sbp, sbp->node);
2255 
2256 			if (hba->state >= FC_LINK_UP) {
2257 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2258 				    IOERR_ABORT_REQUESTED, 1);
2259 			} else {
2260 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2261 				    IOERR_LINK_DOWN, 1);
2262 			}
2263 
2264 		}
2265 		/* Free the iocb and its associated buffers */
2266 		else {
2267 			icmd = &iocbq->iocb;
2268 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2269 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2270 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2271 				if ((hba->flag &
2272 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2273 					/* HBA is detaching or offlining */
2274 					if (icmd->ulpCommand !=
2275 					    CMD_QUE_RING_LIST64_CN) {
2276 						uint8_t *tmp;
2277 
2278 						for (i = 0;
2279 						    i < icmd->ulpBdeCount;
2280 						    i++) {
2281 							mp = EMLXS_GET_VADDR(
2282 							    hba, rp, icmd);
2283 
2284 							tmp = (uint8_t *)mp;
2285 							if (mp) {
2286 	(void) emlxs_mem_put(hba, MEM_BUF, tmp);
2287 							}
2288 						}
2289 					}
2290 					(void) emlxs_mem_put(hba, MEM_IOCB,
2291 					    (uint8_t *)iocbq);
2292 				} else {
2293 					/* repost the unsolicited buffer */
2294 					emlxs_issue_iocb_cmd(hba, rp, iocbq);
2295 				}
2296 			}
2297 		}
2298 
2299 		iocbq = next;
2300 
2301 	}	/* end of while */
2302 
2303 	return (abort.q_cnt);
2304 
2305 } /* emlxs_tx_node_flush() */
2306 
2307 
2308 /* Check for IO's on all or a given ring for a given node */
2309 extern uint32_t
2310 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, RING *ring)
2311 {
2312 	emlxs_hba_t *hba = HBA;
2313 	uint32_t ringno;
2314 	RING *rp;
2315 	uint32_t count;
2316 
2317 	count = 0;
2318 
2319 	/* Flush all I/O's on tx queue to this target */
2320 	mutex_enter(&EMLXS_RINGTX_LOCK);
2321 
2322 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2323 		rp = &hba->ring[ringno];
2324 
2325 		if (ring && rp != ring) {
2326 			continue;
2327 		}
2328 		/* Check if priority queue is not empty */
2329 		if (ndlp->nlp_ptx[ringno].q_first) {
2330 			count += ndlp->nlp_ptx[ringno].q_cnt;
2331 		}
2332 		/* Check if tx queue is not empty */
2333 		if (ndlp->nlp_tx[ringno].q_first) {
2334 			count += ndlp->nlp_tx[ringno].q_cnt;
2335 		}
2336 	}
2337 
2338 	mutex_exit(&EMLXS_RINGTX_LOCK);
2339 
2340 	return (count);
2341 
2342 } /* emlxs_tx_node_check() */
2343 
2344 
2345 
2346 /* Flush all IO's on the FCP ring for a given node's lun */
2347 extern uint32_t
2348 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
2349     uint32_t lun, emlxs_buf_t *fpkt)
2350 {
2351 	emlxs_hba_t *hba = HBA;
2352 	emlxs_buf_t *sbp;
2353 	uint32_t ringno;
2354 	IOCBQ *iocbq;
2355 	IOCBQ *prev;
2356 	IOCBQ *next;
2357 	IOCB *iocb;
2358 	IOCB *icmd;
2359 	Q abort;
2360 	uint32_t i;
2361 	MATCHMAP *mp;
2362 	RING *rp;
2363 
2364 	ringno = FC_FCP_RING;
2365 	rp = &hba->ring[ringno];
2366 
2367 	bzero((void *)&abort, sizeof (Q));
2368 
2369 	/* Flush I/O's on txQ to this target's lun */
2370 	mutex_enter(&EMLXS_RINGTX_LOCK);
2371 
2372 	/* Scan the priority queue first */
2373 	prev = NULL;
2374 	iocbq = (IOCBQ *)ndlp->nlp_ptx[ringno].q_first;
2375 
2376 	while (iocbq) {
2377 		next = (IOCBQ *)iocbq->next;
2378 		iocb = &iocbq->iocb;
2379 		sbp = (emlxs_buf_t *)iocbq->sbp;
2380 
2381 		/* Check if this IO is for our lun */
2382 		if (sbp->lun == lun) {
2383 			/* Remove iocb from the node's tx queue */
2384 			if (next == 0) {
2385 				ndlp->nlp_ptx[ringno].q_last = (uint8_t *)prev;
2386 			}
2387 			if (prev == 0) {
2388 				ndlp->nlp_ptx[ringno].q_first = (uint8_t *)next;
2389 			} else {
2390 				prev->next = next;
2391 			}
2392 
2393 			iocbq->next = NULL;
2394 			ndlp->nlp_ptx[ringno].q_cnt--;
2395 
2396 			/* Add this iocb to our local abort Q */
2397 			/* This way we don't hold the RINGTX lock too long */
2398 			if (abort.q_first) {
2399 				((IOCBQ *) abort.q_last)->next = iocbq;
2400 				abort.q_last = (uint8_t *)iocbq;
2401 				abort.q_cnt++;
2402 			} else {
2403 				abort.q_first = (uint8_t *)iocbq;
2404 				abort.q_last = (uint8_t *)iocbq;
2405 				abort.q_cnt = 1;
2406 			}
2407 			iocbq->next = NULL;
2408 		} else {
2409 			prev = iocbq;
2410 		}
2411 
2412 		iocbq = next;
2413 
2414 	}	/* while (iocbq) */
2415 
2416 
2417 	/* Scan the regular queue */
2418 	prev = NULL;
2419 	iocbq = (IOCBQ *)ndlp->nlp_tx[ringno].q_first;
2420 
2421 	while (iocbq) {
2422 		next = (IOCBQ *)iocbq->next;
2423 		iocb = &iocbq->iocb;
2424 		sbp = (emlxs_buf_t *)iocbq->sbp;
2425 
2426 		/* Check if this IO is for our lun */
2427 		if (sbp->lun == lun) {
2428 			/* Remove iocb from the node's tx queue */
2429 			if (next == 0) {
2430 				ndlp->nlp_tx[ringno].q_last = (uint8_t *)prev;
2431 			}
2432 			if (prev == 0) {
2433 				ndlp->nlp_tx[ringno].q_first = (uint8_t *)next;
2434 			} else {
2435 				prev->next = next;
2436 			}
2437 
2438 			iocbq->next = NULL;
2439 			ndlp->nlp_tx[ringno].q_cnt--;
2440 
2441 			/* Add this iocb to our local abort Q */
2442 			/* This way we don't hold the RINGTX lock too long */
2443 			if (abort.q_first) {
2444 				((IOCBQ *) abort.q_last)->next = iocbq;
2445 				abort.q_last = (uint8_t *)iocbq;
2446 				abort.q_cnt++;
2447 			} else {
2448 				abort.q_first = (uint8_t *)iocbq;
2449 				abort.q_last = (uint8_t *)iocbq;
2450 				abort.q_cnt = 1;
2451 			}
2452 			iocbq->next = NULL;
2453 		} else {
2454 			prev = iocbq;
2455 		}
2456 
2457 		iocbq = next;
2458 
2459 	}	/* while (iocbq) */
2460 
2461 	/* First cleanup the iocb's while still holding the lock */
2462 	iocbq = (IOCBQ *)abort.q_first;
2463 	while (iocbq) {
2464 		/* Free the IoTag and the bmp */
2465 		iocb = &iocbq->iocb;
2466 		sbp = emlxs_unregister_pkt(iocbq->ring, iocb->ulpIoTag, 0);
2467 
2468 		if (sbp && (sbp != STALE_PACKET)) {
2469 			mutex_enter(&sbp->mtx);
2470 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2471 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2472 				hba->ring_tx_count[ringno]--;
2473 			}
2474 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2475 
2476 			/*
2477 			 * If the fpkt is already set, then we will leave it
2478 			 * alone
2479 			 */
2480 			/*
2481 			 * This ensures that this pkt is only accounted for
2482 			 * on one fpkt->flush_count
2483 			 */
2484 			if (!sbp->fpkt && fpkt) {
2485 				mutex_enter(&fpkt->mtx);
2486 				sbp->fpkt = fpkt;
2487 				fpkt->flush_count++;
2488 				mutex_exit(&fpkt->mtx);
2489 			}
2490 			mutex_exit(&sbp->mtx);
2491 		}
2492 		iocbq = (IOCBQ *)iocbq->next;
2493 
2494 	}	/* end of while */
2495 
2496 	mutex_exit(&EMLXS_RINGTX_LOCK);
2497 
2498 	/* Now abort the iocb's outside the locks */
2499 	iocbq = (IOCBQ *)abort.q_first;
2500 	while (iocbq) {
2501 		/* Save the next iocbq for now */
2502 		next = (IOCBQ *)iocbq->next;
2503 
2504 		/* Unlink this iocbq */
2505 		iocbq->next = NULL;
2506 
2507 		/* Get the pkt */
2508 		sbp = (emlxs_buf_t *)iocbq->sbp;
2509 
2510 		if (sbp) {
2511 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2512 			    "tx: sbp=%p node=%p",
2513 			    sbp, sbp->node);
2514 
2515 			if (hba->state >= FC_LINK_UP) {
2516 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2517 				    IOERR_ABORT_REQUESTED, 1);
2518 			} else {
2519 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2520 				    IOERR_LINK_DOWN, 1);
2521 			}
2522 		}
2523 		/* Free the iocb and its associated buffers */
2524 		else {
2525 			icmd = &iocbq->iocb;
2526 
2527 			if (icmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
2528 			    icmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
2529 			    icmd->ulpCommand == CMD_QUE_RING_LIST64_CN) {
2530 				if ((hba->flag &
2531 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2532 					/* HBA is detaching or offlining */
2533 					if (icmd->ulpCommand !=
2534 					    CMD_QUE_RING_LIST64_CN) {
2535 						uint8_t *tmp;
2536 
2537 						for (i = 0;
2538 						    i < icmd->ulpBdeCount;
2539 						    i++) {
2540 							mp = EMLXS_GET_VADDR(
2541 							    hba, rp, icmd);
2542 
2543 							tmp = (uint8_t *)mp;
2544 							if (mp) {
2545 	(void) emlxs_mem_put(hba, MEM_BUF, tmp);
2546 							}
2547 						}
2548 					}
2549 					(void) emlxs_mem_put(hba, MEM_IOCB,
2550 					    (uint8_t *)iocbq);
2551 				} else {
2552 					/* repost the unsolicited buffer */
2553 					emlxs_issue_iocb_cmd(hba, rp, iocbq);
2554 				}
2555 			}
2556 		}
2557 
2558 		iocbq = next;
2559 
2560 	}	/* end of while */
2561 
2562 
2563 	return (abort.q_cnt);
2564 
2565 } /* emlxs_tx_lun_flush() */
2566 
2567 
2568 extern void
2569 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2570 {
2571 	emlxs_hba_t *hba;
2572 	emlxs_port_t *port;
2573 	uint32_t ringno;
2574 	NODELIST *nlp;
2575 	RING *rp;
2576 	emlxs_buf_t *sbp;
2577 
2578 	port = (emlxs_port_t *)iocbq->port;
2579 	hba = HBA;
2580 	rp = (RING *)iocbq->ring;
2581 	nlp = (NODELIST *)iocbq->node;
2582 	ringno = rp->ringno;
2583 	sbp = (emlxs_buf_t *)iocbq->sbp;
2584 
2585 	if (nlp == NULL) {
2586 		/* Set node to base node by default */
2587 		nlp = &port->node_base;
2588 
2589 		iocbq->node = (void *)nlp;
2590 
2591 		if (sbp) {
2592 			sbp->node = (void *)nlp;
2593 		}
2594 	}
2595 	if (lock) {
2596 		mutex_enter(&EMLXS_RINGTX_LOCK);
2597 	}
2598 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2599 		if (sbp) {
2600 			mutex_enter(&sbp->mtx);
2601 
2602 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2603 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2604 				hba->ring_tx_count[ringno]--;
2605 			}
2606 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2607 
2608 			mutex_exit(&sbp->mtx);
2609 
2610 			/* Free the ulpIoTag and the bmp */
2611 			(void) emlxs_unregister_pkt(rp, sbp->iotag, 0);
2612 
2613 			if (lock) {
2614 				mutex_exit(&EMLXS_RINGTX_LOCK);
2615 			}
2616 			if (hba->state >= FC_LINK_UP) {
2617 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2618 				    IOERR_ABORT_REQUESTED, 1);
2619 			} else {
2620 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2621 				    IOERR_LINK_DOWN, 1);
2622 			}
2623 
2624 			return;
2625 		} else {
2626 			if (lock) {
2627 				mutex_exit(&EMLXS_RINGTX_LOCK);
2628 			}
2629 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2630 		}
2631 
2632 		return;
2633 	}
2634 	if (sbp) {
2635 
2636 		mutex_enter(&sbp->mtx);
2637 
2638 		if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ |
2639 		    PACKET_IN_TXQ)) {
2640 			mutex_exit(&sbp->mtx);
2641 			if (lock) {
2642 				mutex_exit(&EMLXS_RINGTX_LOCK);
2643 			}
2644 			return;
2645 		}
2646 		sbp->pkt_flags |= PACKET_IN_TXQ;
2647 		hba->ring_tx_count[ringno]++;
2648 
2649 		mutex_exit(&sbp->mtx);
2650 	}
2651 	/* Check iocbq priority */
2652 	if (iocbq->flag & IOCB_PRIORITY) {
2653 		/* Add the iocb to the bottom of the node's ptx queue */
2654 		if (nlp->nlp_ptx[ringno].q_first) {
2655 			((IOCBQ *)nlp->nlp_ptx[ringno].q_last)->next = iocbq;
2656 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2657 			nlp->nlp_ptx[ringno].q_cnt++;
2658 		} else {
2659 			nlp->nlp_ptx[ringno].q_first = (uint8_t *)iocbq;
2660 			nlp->nlp_ptx[ringno].q_last = (uint8_t *)iocbq;
2661 			nlp->nlp_ptx[ringno].q_cnt = 1;
2662 		}
2663 
2664 		iocbq->next = NULL;
2665 	} else {	/* Normal priority */
2666 
2667 		/* Add the iocb to the bottom of the node's tx queue */
2668 		if (nlp->nlp_tx[ringno].q_first) {
2669 			((IOCBQ *)nlp->nlp_tx[ringno].q_last)->next = iocbq;
2670 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2671 			nlp->nlp_tx[ringno].q_cnt++;
2672 		} else {
2673 			nlp->nlp_tx[ringno].q_first = (uint8_t *)iocbq;
2674 			nlp->nlp_tx[ringno].q_last = (uint8_t *)iocbq;
2675 			nlp->nlp_tx[ringno].q_cnt = 1;
2676 		}
2677 
2678 		iocbq->next = NULL;
2679 	}
2680 
2681 
2682 	/*
2683 	 * Check if the node is not already on ring queue and (is not closed
2684 	 * or  is a priority request)
2685 	 */
2686 	if (!nlp->nlp_next[ringno] && (!(nlp->nlp_flag[ringno] & NLP_CLOSED) ||
2687 	    (iocbq->flag & IOCB_PRIORITY))) {
2688 		/* If so, then add it to the ring queue */
2689 		if (rp->nodeq.q_first) {
2690 			((NODELIST *)rp->nodeq.q_last)->nlp_next[ringno] =
2691 			    (uint8_t *)nlp;
2692 			nlp->nlp_next[ringno] = rp->nodeq.q_first;
2693 
2694 			/*
2695 			 * If this is not the base node then add it to the
2696 			 * tail
2697 			 */
2698 			if (!nlp->nlp_base) {
2699 				rp->nodeq.q_last = (uint8_t *)nlp;
2700 			} else {	/* Otherwise, add it to the head */
2701 				/* The command node always gets priority */
2702 				rp->nodeq.q_first = (uint8_t *)nlp;
2703 			}
2704 
2705 			rp->nodeq.q_cnt++;
2706 		} else {
2707 			rp->nodeq.q_first = (uint8_t *)nlp;
2708 			rp->nodeq.q_last = (uint8_t *)nlp;
2709 			nlp->nlp_next[ringno] = nlp;
2710 			rp->nodeq.q_cnt = 1;
2711 		}
2712 	}
2713 	HBASTATS.IocbTxPut[ringno]++;
2714 
2715 	/* Adjust the ring timeout timer */
2716 	rp->timeout = hba->timer_tics + 5;
2717 
2718 	if (lock) {
2719 		mutex_exit(&EMLXS_RINGTX_LOCK);
2720 	}
2721 	return;
2722 
2723 } /* emlxs_tx_put() */
2724 
2725 
2726 extern IOCBQ *
2727 emlxs_tx_get(RING *rp, uint32_t lock)
2728 {
2729 	emlxs_hba_t *hba;
2730 	uint32_t ringno;
2731 	IOCBQ *iocbq;
2732 	NODELIST *nlp;
2733 	emlxs_buf_t *sbp;
2734 
2735 	hba = rp->hba;
2736 	ringno = rp->ringno;
2737 
2738 	if (lock) {
2739 		mutex_enter(&EMLXS_RINGTX_LOCK);
2740 	}
2741 begin:
2742 
2743 	iocbq = NULL;
2744 
2745 	/* Check if a node needs servicing */
2746 	if (rp->nodeq.q_first) {
2747 		nlp = (NODELIST *)rp->nodeq.q_first;
2748 
2749 		/* Get next iocb from node's priority queue */
2750 
2751 		if (nlp->nlp_ptx[ringno].q_first) {
2752 			iocbq = (IOCBQ *)nlp->nlp_ptx[ringno].q_first;
2753 
2754 			/* Check if this is last entry */
2755 			if (nlp->nlp_ptx[ringno].q_last == (void *)iocbq) {
2756 				nlp->nlp_ptx[ringno].q_first = NULL;
2757 				nlp->nlp_ptx[ringno].q_last = NULL;
2758 				nlp->nlp_ptx[ringno].q_cnt = 0;
2759 			} else {
2760 				/* Remove iocb from head */
2761 				nlp->nlp_ptx[ringno].q_first =
2762 				    (void *)iocbq->next;
2763 				nlp->nlp_ptx[ringno].q_cnt--;
2764 			}
2765 
2766 			iocbq->next = NULL;
2767 		}
2768 		/* Get next iocb from node tx queue if node not closed */
2769 		else if (nlp->nlp_tx[ringno].q_first &&
2770 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED)) {
2771 			iocbq = (IOCBQ *)nlp->nlp_tx[ringno].q_first;
2772 
2773 			/* Check if this is last entry */
2774 			if (nlp->nlp_tx[ringno].q_last == (void *)iocbq) {
2775 				nlp->nlp_tx[ringno].q_first = NULL;
2776 				nlp->nlp_tx[ringno].q_last = NULL;
2777 				nlp->nlp_tx[ringno].q_cnt = 0;
2778 			} else {
2779 				/* Remove iocb from head */
2780 				nlp->nlp_tx[ringno].q_first =
2781 				    (void *)iocbq->next;
2782 				nlp->nlp_tx[ringno].q_cnt--;
2783 			}
2784 
2785 			iocbq->next = NULL;
2786 		}
2787 		/* Now deal with node itself */
2788 
2789 		/* Check if node still needs servicing */
2790 		if ((nlp->nlp_ptx[ringno].q_first) ||
2791 		    (nlp->nlp_tx[ringno].q_first &&
2792 		    !(nlp->nlp_flag[ringno] & NLP_CLOSED))) {
2793 
2794 			/*
2795 			 * If this is the base node, then don't shift the
2796 			 * pointers
2797 			 */
2798 			/* We want to drain the base node before moving on */
2799 			if (!nlp->nlp_base) {
2800 				/*
2801 				 * Just shift ring queue pointers to next
2802 				 * node
2803 				 */
2804 				rp->nodeq.q_last = (void *)nlp;
2805 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2806 			}
2807 		} else {
2808 			/* Remove node from ring queue */
2809 
2810 			/* If this is the last node on list */
2811 			if (rp->nodeq.q_last == (void *)nlp) {
2812 				rp->nodeq.q_last = NULL;
2813 				rp->nodeq.q_first = NULL;
2814 				rp->nodeq.q_cnt = 0;
2815 			} else {
2816 				NODELIST *nd;
2817 
2818 				/* Remove node from head */
2819 				rp->nodeq.q_first = nlp->nlp_next[ringno];
2820 				nd = (NODELIST *)rp->nodeq.q_last;
2821 				nd->nlp_next[ringno] = rp->nodeq.q_first;
2822 				rp->nodeq.q_cnt--;
2823 
2824 			}
2825 
2826 			/* Clear node */
2827 			nlp->nlp_next[ringno] = NULL;
2828 		}
2829 
2830 		/*
2831 		 * If no iocbq was found on this node, then it will have been
2832 		 * removed. So try again.
2833 		 */
2834 		if (!iocbq) {
2835 			goto begin;
2836 		}
2837 		sbp = (emlxs_buf_t *)iocbq->sbp;
2838 
2839 		if (sbp) {
2840 			/*
2841 			 * Check flags before we enter mutex in case this has
2842 			 * been flushed and destroyed
2843 			 */
2844 			if ((sbp->pkt_flags &
2845 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2846 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2847 				goto begin;
2848 			}
2849 			mutex_enter(&sbp->mtx);
2850 
2851 			if ((sbp->pkt_flags &
2852 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
2853 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
2854 				mutex_exit(&sbp->mtx);
2855 				goto begin;
2856 			}
2857 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
2858 			hba->ring_tx_count[ringno]--;
2859 
2860 			mutex_exit(&sbp->mtx);
2861 		}
2862 	}
2863 	if (iocbq) {
2864 		HBASTATS.IocbTxGet[ringno]++;
2865 	}
2866 	/* Adjust the ring timeout timer */
2867 	rp->timeout = (rp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
2868 
2869 	if (lock) {
2870 		mutex_exit(&EMLXS_RINGTX_LOCK);
2871 	}
2872 	return (iocbq);
2873 
2874 } /* emlxs_tx_get() */
2875 
2876 
2877 
2878 extern uint32_t
2879 emlxs_chipq_node_flush(emlxs_port_t *port, RING *ring,
2880     NODELIST *ndlp, emlxs_buf_t *fpkt)
2881 {
2882 	emlxs_hba_t *hba = HBA;
2883 	emlxs_buf_t *sbp;
2884 	IOCBQ *iocbq;
2885 	IOCBQ *next;
2886 	Q abort;
2887 	RING *rp;
2888 	uint32_t ringno;
2889 	uint8_t flag[MAX_RINGS];
2890 	uint32_t iotag;
2891 
2892 	bzero((void *)&abort, sizeof (Q));
2893 	bzero((void *)flag, sizeof (flag));
2894 
2895 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2896 		rp = &hba->ring[ringno];
2897 
2898 		if (ring && rp != ring) {
2899 			continue;
2900 		}
2901 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
2902 
2903 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
2904 			sbp = rp->fc_table[iotag];
2905 
2906 			if (sbp && (sbp != STALE_PACKET) &&
2907 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
2908 			    (sbp->node == ndlp) &&
2909 			    (sbp->ring == rp) &&
2910 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
2911 				emlxs_sbp_abort_add(port, sbp, &abort,
2912 				    flag, fpkt);
2913 			}
2914 		}
2915 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
2916 
2917 	}	/* for */
2918 
2919 	/* Now put the iocb's on the tx queue */
2920 	iocbq = (IOCBQ *)abort.q_first;
2921 	while (iocbq) {
2922 		/* Save the next iocbq for now */
2923 		next = (IOCBQ *)iocbq->next;
2924 
2925 		/* Unlink this iocbq */
2926 		iocbq->next = NULL;
2927 
2928 		/* Send this iocbq */
2929 		emlxs_tx_put(iocbq, 1);
2930 
2931 		iocbq = next;
2932 	}
2933 
2934 	/* Now trigger ring service */
2935 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2936 		if (!flag[ringno]) {
2937 			continue;
2938 		}
2939 		rp = &hba->ring[ringno];
2940 
2941 		emlxs_issue_iocb_cmd(hba, rp, 0);
2942 	}
2943 
2944 	return (abort.q_cnt);
2945 
2946 } /* emlxs_chipq_node_flush() */
2947 
2948 
2949 /* Flush all IO's left on all iotag lists */
2950 static uint32_t
2951 emlxs_iotag_flush(emlxs_hba_t *hba)
2952 {
2953 	emlxs_port_t *port = &PPORT;
2954 	emlxs_buf_t *sbp;
2955 	IOCBQ *iocbq;
2956 	IOCB *iocb;
2957 	Q abort;
2958 	RING *rp;
2959 	uint32_t ringno;
2960 	uint32_t iotag;
2961 	uint32_t count;
2962 
2963 	count = 0;
2964 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
2965 		rp = &hba->ring[ringno];
2966 
2967 		bzero((void *)&abort, sizeof (Q));
2968 
2969 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
2970 
2971 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
2972 			sbp = rp->fc_table[iotag];
2973 
2974 			if (!sbp || (sbp == STALE_PACKET)) {
2975 				continue;
2976 			}
2977 			/* Unregister the packet */
2978 			rp->fc_table[iotag] = STALE_PACKET;
2979 			hba->io_count[ringno]--;
2980 			sbp->iotag = 0;
2981 
2982 			/* Clean up the sbp */
2983 			mutex_enter(&sbp->mtx);
2984 
2985 			/* Set IOCB status */
2986 			iocbq = &sbp->iocbq;
2987 			iocb = &iocbq->iocb;
2988 
2989 			iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
2990 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
2991 			iocb->ulpLe = 1;
2992 			iocbq->next = NULL;
2993 
2994 			if (sbp->pkt_flags & PACKET_IN_TXQ) {
2995 				sbp->pkt_flags &= ~PACKET_IN_TXQ;
2996 				hba->ring_tx_count[ringno]--;
2997 			}
2998 			if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2999 				sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3000 			}
3001 			if (sbp->bmp) {
3002 				(void) emlxs_mem_put(hba, MEM_BPL,
3003 				    (uint8_t *)sbp->bmp);
3004 				sbp->bmp = 0;
3005 			}
3006 			/* At this point all nodes are assumed destroyed */
3007 			sbp->node = 0;
3008 
3009 			mutex_exit(&sbp->mtx);
3010 
3011 			/* Add this iocb to our local abort Q */
3012 			if (abort.q_first) {
3013 				((IOCBQ *) abort.q_last)->next = iocbq;
3014 				abort.q_last = (uint8_t *)iocbq;
3015 				abort.q_cnt++;
3016 			} else {
3017 				abort.q_first = (uint8_t *)iocbq;
3018 				abort.q_last = (uint8_t *)iocbq;
3019 				abort.q_cnt = 1;
3020 			}
3021 		}
3022 
3023 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3024 
3025 		/* Trigger deferred completion */
3026 		if (abort.q_first) {
3027 			mutex_enter(&rp->rsp_lock);
3028 			if (rp->rsp_head == NULL) {
3029 				rp->rsp_head = (IOCBQ *)abort.q_first;
3030 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3031 			} else {
3032 				rp->rsp_tail->next = (IOCBQ *)abort.q_first;
3033 				rp->rsp_tail = (IOCBQ *)abort.q_last;
3034 			}
3035 			mutex_exit(&rp->rsp_lock);
3036 
3037 			emlxs_thread_trigger2(&rp->intr_thread,
3038 			    emlxs_proc_ring, rp);
3039 
3040 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3041 			    "Forced iotag completion. ring=%d count=%d",
3042 			    ringno, abort.q_cnt);
3043 
3044 			count += abort.q_cnt;
3045 		}
3046 	}
3047 
3048 	return (count);
3049 
3050 } /* emlxs_iotag_flush() */
3051 
3052 
3053 
3054 /* Checks for IO's on all or a given ring for a given node */
3055 extern uint32_t
3056 emlxs_chipq_node_check(emlxs_port_t *port, RING *ring, NODELIST *ndlp)
3057 {
3058 	emlxs_hba_t *hba = HBA;
3059 	emlxs_buf_t *sbp;
3060 	RING *rp;
3061 	uint32_t ringno;
3062 	uint32_t count;
3063 	uint32_t iotag;
3064 
3065 	count = 0;
3066 
3067 	for (ringno = 0; ringno < hba->ring_count; ringno++) {
3068 		rp = &hba->ring[ringno];
3069 
3070 		if (ring && rp != ring) {
3071 			continue;
3072 		}
3073 		mutex_enter(&EMLXS_FCTAB_LOCK(ringno));
3074 
3075 		for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3076 			sbp = rp->fc_table[iotag];
3077 
3078 			if (sbp && (sbp != STALE_PACKET) &&
3079 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3080 			    (sbp->node == ndlp) &&
3081 			    (sbp->ring == rp) &&
3082 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3083 				count++;
3084 			}
3085 		}
3086 		mutex_exit(&EMLXS_FCTAB_LOCK(ringno));
3087 
3088 	}	/* for */
3089 
3090 	return (count);
3091 
3092 } /* emlxs_chipq_node_check() */
3093 
3094 
3095 
3096 /* Flush all IO's for a given node's lun (FC_FCP_RING only) */
3097 extern uint32_t
3098 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3099     uint32_t lun, emlxs_buf_t *fpkt)
3100 {
3101 	emlxs_hba_t *hba = HBA;
3102 	emlxs_buf_t *sbp;
3103 	RING *rp;
3104 	IOCBQ *iocbq;
3105 	IOCBQ *next;
3106 	Q abort;
3107 	uint32_t iotag;
3108 	uint8_t flag[MAX_RINGS];
3109 
3110 	bzero((void *)flag, sizeof (flag));
3111 	bzero((void *)&abort, sizeof (Q));
3112 	rp = &hba->ring[FC_FCP_RING];
3113 
3114 	mutex_enter(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3115 	for (iotag = 1; iotag < rp->max_iotag; iotag++) {
3116 		sbp = rp->fc_table[iotag];
3117 
3118 		if (sbp && (sbp != STALE_PACKET) &&
3119 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3120 		    sbp->node == ndlp &&
3121 		    sbp->ring == rp &&
3122 		    sbp->lun == lun &&
3123 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3124 			emlxs_sbp_abort_add(port, sbp, &abort, flag, fpkt);
3125 		}
3126 	}
3127 	mutex_exit(&EMLXS_FCTAB_LOCK(FC_FCP_RING));
3128 
3129 	/* Now put the iocb's on the tx queue */
3130 	iocbq = (IOCBQ *)abort.q_first;
3131 	while (iocbq) {
3132 		/* Save the next iocbq for now */
3133 		next = (IOCBQ *)iocbq->next;
3134 
3135 		/* Unlink this iocbq */
3136 		iocbq->next = NULL;
3137 
3138 		/* Send this iocbq */
3139 		emlxs_tx_put(iocbq, 1);
3140 
3141 		iocbq = next;
3142 	}
3143 
3144 	/* Now trigger ring service */
3145 	if (abort.q_cnt) {
3146 		emlxs_issue_iocb_cmd(hba, rp, 0);
3147 	}
3148 	return (abort.q_cnt);
3149 
3150 } /* emlxs_chipq_lun_flush() */
3151 
3152 
3153 
3154 /*
3155  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3156  * This must be called while holding the EMLXS_FCCTAB_LOCK
3157  */
3158 extern IOCBQ *
3159 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp, uint16_t iotag,
3160     RING *rp, uint8_t class, int32_t flag)
3161 {
3162 	emlxs_hba_t *hba = HBA;
3163 	IOCBQ *iocbq;
3164 	IOCB *iocb;
3165 	uint16_t abort_iotag;
3166 
3167 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3168 		return (NULL);
3169 	}
3170 	iocbq->ring = (void *)rp;
3171 	iocbq->port = (void *)port;
3172 	iocbq->node = (void *)ndlp;
3173 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3174 	iocb = &iocbq->iocb;
3175 
3176 	/*
3177 	 * set up an iotag using special Abort iotags
3178 	 */
3179 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3180 		rp->fc_abort_iotag = rp->max_iotag;
3181 	}
3182 	abort_iotag = rp->fc_abort_iotag++;
3183 
3184 
3185 	iocb->ulpIoTag = abort_iotag;
3186 	iocb->un.acxri.abortType = flag;
3187 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3188 	iocb->un.acxri.abortIoTag = iotag;
3189 	iocb->ulpLe = 1;
3190 	iocb->ulpClass = class;
3191 	iocb->ulpCommand = CMD_ABORT_XRI_CN;
3192 	iocb->ulpOwner = OWN_CHIP;
3193 
3194 	return (iocbq);
3195 
3196 } /* emlxs_create_abort_xri_cn() */
3197 
3198 
3199 extern IOCBQ *
3200 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3201     RING *rp, uint8_t class, int32_t flag)
3202 {
3203 	emlxs_hba_t *hba = HBA;
3204 	IOCBQ *iocbq;
3205 	IOCB *iocb;
3206 	uint16_t abort_iotag;
3207 
3208 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3209 		return (NULL);
3210 	}
3211 	iocbq->ring = (void *)rp;
3212 	iocbq->port = (void *)port;
3213 	iocbq->node = (void *)ndlp;
3214 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3215 	iocb = &iocbq->iocb;
3216 
3217 	/*
3218 	 * set up an iotag using special Abort iotags
3219 	 */
3220 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3221 		rp->fc_abort_iotag = rp->max_iotag;
3222 	}
3223 	abort_iotag = rp->fc_abort_iotag++;
3224 
3225 	iocb->ulpContext = xid;
3226 	iocb->ulpIoTag = abort_iotag;
3227 	iocb->un.acxri.abortType = flag;
3228 	iocb->ulpLe = 1;
3229 	iocb->ulpClass = class;
3230 	iocb->ulpCommand = CMD_ABORT_XRI_CX;
3231 	iocb->ulpOwner = OWN_CHIP;
3232 
3233 	return (iocbq);
3234 
3235 } /* emlxs_create_abort_xri_cx() */
3236 
3237 
3238 
3239 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3240 extern IOCBQ *
3241 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3242     uint16_t iotag, RING *rp)
3243 {
3244 	emlxs_hba_t *hba = HBA;
3245 	IOCBQ *iocbq;
3246 	IOCB *iocb;
3247 	uint16_t abort_iotag;
3248 
3249 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3250 		return (NULL);
3251 	}
3252 	iocbq->ring = (void *)rp;
3253 	iocbq->port = (void *)port;
3254 	iocbq->node = (void *)ndlp;
3255 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3256 	iocb = &iocbq->iocb;
3257 
3258 	/*
3259 	 * set up an iotag using special Abort iotags
3260 	 */
3261 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3262 		rp->fc_abort_iotag = rp->max_iotag;
3263 	}
3264 	abort_iotag = rp->fc_abort_iotag++;
3265 
3266 	iocb->ulpIoTag = abort_iotag;
3267 	iocb->un.acxri.abortType = 0;
3268 	iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3269 	iocb->un.acxri.abortIoTag = iotag;
3270 	iocb->ulpLe = 1;
3271 	iocb->ulpClass = 0;
3272 	iocb->ulpCommand = CMD_CLOSE_XRI_CN;
3273 	iocb->ulpOwner = OWN_CHIP;
3274 
3275 	return (iocbq);
3276 
3277 } /* emlxs_create_close_xri_cn() */
3278 
3279 
3280 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3281 extern IOCBQ *
3282 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp,
3283     uint16_t xid, RING *rp)
3284 {
3285 	emlxs_hba_t *hba = HBA;
3286 	IOCBQ *iocbq;
3287 	IOCB *iocb;
3288 	uint16_t abort_iotag;
3289 
3290 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
3291 		return (NULL);
3292 	}
3293 	iocbq->ring = (void *)rp;
3294 	iocbq->port = (void *)port;
3295 	iocbq->node = (void *)ndlp;
3296 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3297 	iocb = &iocbq->iocb;
3298 
3299 	/*
3300 	 * set up an iotag using special Abort iotags
3301 	 */
3302 	if ((rp->fc_abort_iotag < rp->max_iotag)) {
3303 		rp->fc_abort_iotag = rp->max_iotag;
3304 	}
3305 	abort_iotag = rp->fc_abort_iotag++;
3306 
3307 	iocb->ulpContext = xid;
3308 	iocb->ulpIoTag = abort_iotag;
3309 	iocb->ulpLe = 1;
3310 	iocb->ulpClass = 0;
3311 	iocb->ulpCommand = CMD_CLOSE_XRI_CX;
3312 	iocb->ulpOwner = OWN_CHIP;
3313 
3314 	return (iocbq);
3315 
3316 } /* emlxs_create_close_xri_cx() */
3317 
3318 
3319 
3320 /* This must be called while holding the EMLXS_FCCTAB_LOCK */
3321 static void
3322 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
3323     uint8_t *flag, emlxs_buf_t *fpkt)
3324 {
3325 	emlxs_hba_t *hba = HBA;
3326 	IOCBQ *iocbq;
3327 	RING *rp;
3328 	NODELIST *ndlp;
3329 
3330 	rp = (RING *)sbp->ring;
3331 	ndlp = sbp->node;
3332 
3333 	/* Create the close XRI IOCB */
3334 	iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, rp);
3335 
3336 	/* Add this iocb to our local abort Q */
3337 	/* This way we don't hold the CHIPQ lock too long */
3338 	if (iocbq) {
3339 		if (abort->q_first) {
3340 			((IOCBQ *) abort->q_last)->next = iocbq;
3341 			abort->q_last = (uint8_t *)iocbq;
3342 			abort->q_cnt++;
3343 		} else {
3344 			abort->q_first = (uint8_t *)iocbq;
3345 			abort->q_last = (uint8_t *)iocbq;
3346 			abort->q_cnt = 1;
3347 		}
3348 		iocbq->next = NULL;
3349 	}
3350 	/* set the flags */
3351 	mutex_enter(&sbp->mtx);
3352 
3353 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
3354 	sbp->ticks = hba->timer_tics + 10;
3355 	sbp->abort_attempts++;
3356 
3357 	flag[rp->ringno] = 1;
3358 
3359 	/* If the fpkt is already set, then we will leave it alone */
3360 	/*
3361 	 * This ensures that this pkt is only accounted for on one
3362 	 * fpkt->flush_count
3363 	 */
3364 	if (!sbp->fpkt && fpkt) {
3365 		mutex_enter(&fpkt->mtx);
3366 		sbp->fpkt = fpkt;
3367 		fpkt->flush_count++;
3368 		mutex_exit(&fpkt->mtx);
3369 	}
3370 	mutex_exit(&sbp->mtx);
3371 
3372 	return;
3373 
3374 } /* emlxs_sbp_abort_add() */
3375