1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 
33 #define	EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
34 	PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 
36 static void	emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
37     Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
38 
39 #define	SCSI3_PERSISTENT_RESERVE_IN	0x5e
40 #define	SCSI_INQUIRY			0x12
41 #define	SCSI_RX_DIAG    		0x1C
42 
43 
44 /*
45  *  emlxs_handle_fcp_event
46  *
47  *  Description: Process an FCP Rsp Ring completion
48  *
49  */
50 /* ARGSUSED */
51 extern void
52 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
53 {
54 	emlxs_port_t *port = &PPORT;
55 	emlxs_config_t	*cfg = &CFG;
56 	IOCB *cmd;
57 	emlxs_buf_t *sbp;
58 	fc_packet_t *pkt = NULL;
59 #ifdef SAN_DIAG_SUPPORT
60 	NODELIST *ndlp;
61 #endif
62 	uint32_t iostat;
63 	uint8_t localstat;
64 	fcp_rsp_t *rsp;
65 	uint32_t rsp_data_resid;
66 	uint32_t check_underrun;
67 	uint8_t asc;
68 	uint8_t ascq;
69 	uint8_t scsi_status;
70 	uint8_t sense;
71 	uint32_t did;
72 	uint32_t fix_it;
73 	uint8_t *scsi_cmd;
74 	uint8_t scsi_opcode;
75 	uint16_t scsi_dl;
76 	uint32_t data_rx;
77 
78 	cmd = &iocbq->iocb;
79 
80 	/* Initialize the status */
81 	iostat = cmd->ULPSTATUS;
82 	localstat = 0;
83 	scsi_status = 0;
84 	asc = 0;
85 	ascq = 0;
86 	sense = 0;
87 	check_underrun = 0;
88 	fix_it = 0;
89 
90 	HBASTATS.FcpEvent++;
91 
92 	sbp = (emlxs_buf_t *)iocbq->sbp;
93 
94 	if (!sbp) {
95 		/* completion with missing xmit command */
96 		HBASTATS.FcpStray++;
97 
98 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 		    "cmd=%x iotag=%x", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100 
101 		return;
102 	}
103 
104 	HBASTATS.FcpCompleted++;
105 
106 #ifdef SAN_DIAG_SUPPORT
107 	emlxs_update_sd_bucket(sbp);
108 #endif /* SAN_DIAG_SUPPORT */
109 
110 	pkt = PRIV2PKT(sbp);
111 
112 	did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 	scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 	scsi_opcode = scsi_cmd[12];
115 	data_rx = 0;
116 
117 	/* Sync data in data buffer only on FC_PKT_FCP_READ */
118 	if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 		EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 		    DDI_DMA_SYNC_FORKERNEL);
121 
122 #ifdef TEST_SUPPORT
123 		if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 		    (pkt->pkt_datalen >= 512)) {
125 			hba->underrun_counter--;
126 			iostat = IOSTAT_FCP_RSP_ERROR;
127 
128 			/* Report 512 bytes missing by adapter */
129 			cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130 
131 			/* Corrupt 512 bytes of Data buffer */
132 			bzero((uint8_t *)pkt->pkt_data, 512);
133 
134 			/* Set FCP response to STATUS_GOOD */
135 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 		}
137 #endif /* TEST_SUPPORT */
138 	}
139 
140 	/* Process the pkt */
141 	mutex_enter(&sbp->mtx);
142 
143 	/* Check for immediate return */
144 	if ((iostat == IOSTAT_SUCCESS) &&
145 	    (pkt->pkt_comp) &&
146 	    !(sbp->pkt_flags &
147 	    (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 	    PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 	    PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 	    PACKET_IN_ABORT | PACKET_POLLED))) {
151 		HBASTATS.FcpGood++;
152 
153 		sbp->pkt_flags |=
154 		    (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 		    PACKET_COMPLETED | PACKET_ULP_OWNED);
156 		mutex_exit(&sbp->mtx);
157 
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 		emlxs_unswap_pkt(sbp);
160 #endif /* EMLXS_MODREV2X */
161 
162 #ifdef FMA_SUPPORT
163 		emlxs_check_dma(hba, sbp);
164 #endif  /* FMA_SUPPORT */
165 
166 		cp->ulpCmplCmd++;
167 		(*pkt->pkt_comp) (pkt);
168 
169 #ifdef FMA_SUPPORT
170 		if (hba->flag & FC_DMA_CHECK_ERROR) {
171 			emlxs_thread_spawn(hba, emlxs_restart_thread,
172 			    NULL, NULL);
173 		}
174 #endif  /* FMA_SUPPORT */
175 
176 		return;
177 	}
178 
179 	/*
180 	 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 	 * is reported.
182 	 */
183 
184 	/* Check if a response buffer was provided */
185 	if ((iostat == IOSTAT_FCP_RSP_ERROR) && pkt->pkt_rsplen) {
186 		EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
187 		    DDI_DMA_SYNC_FORKERNEL);
188 
189 		/* Get the response buffer pointer */
190 		rsp = (fcp_rsp_t *)pkt->pkt_resp;
191 
192 		/* Set the valid response flag */
193 		sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
194 
195 		scsi_status = rsp->fcp_u.fcp_status.scsi_status;
196 
197 #ifdef SAN_DIAG_SUPPORT
198 		ndlp = (NODELIST *)iocbq->node;
199 		if (scsi_status == SCSI_STAT_QUE_FULL) {
200 			emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
201 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
202 		} else if (scsi_status == SCSI_STAT_BUSY) {
203 			emlxs_log_sd_scsi_event(port,
204 			    SD_SCSI_SUBCATEGORY_DEVBSY,
205 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
206 		}
207 #endif
208 
209 		/*
210 		 * Convert a task abort to a check condition with no data
211 		 * transferred. We saw a data corruption when Solaris received
212 		 * a Task Abort from a tape.
213 		 */
214 		if (scsi_status == SCSI_STAT_TASK_ABORT) {
215 			EMLXS_MSGF(EMLXS_CONTEXT,
216 			    &emlxs_fcp_completion_error_msg,
217 			    "Task Abort. "
218 			    "Fixed.did=0x%06x sbp=%p cmd=%02x dl=%d",
219 			    did, sbp, scsi_opcode, pkt->pkt_datalen);
220 
221 			rsp->fcp_u.fcp_status.scsi_status =
222 			    SCSI_STAT_CHECK_COND;
223 			rsp->fcp_u.fcp_status.rsp_len_set = 0;
224 			rsp->fcp_u.fcp_status.sense_len_set = 0;
225 			rsp->fcp_u.fcp_status.resid_over = 0;
226 
227 			if (pkt->pkt_datalen) {
228 				rsp->fcp_u.fcp_status.resid_under = 1;
229 				rsp->fcp_resid =
230 				    LE_SWAP32(pkt->pkt_datalen);
231 			} else {
232 				rsp->fcp_u.fcp_status.resid_under = 0;
233 				rsp->fcp_resid = 0;
234 			}
235 
236 			scsi_status = SCSI_STAT_CHECK_COND;
237 		}
238 
239 		/*
240 		 * We only need to check underrun if data could
241 		 * have been sent
242 		 */
243 
244 		/* Always check underrun if status is good */
245 		if (scsi_status == SCSI_STAT_GOOD) {
246 			check_underrun = 1;
247 		}
248 		/* Check the sense codes if this is a check condition */
249 		else if (scsi_status == SCSI_STAT_CHECK_COND) {
250 			check_underrun = 1;
251 
252 			/* Check if sense data was provided */
253 			if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
254 				sense = *((uint8_t *)rsp + 32 + 2);
255 				asc = *((uint8_t *)rsp + 32 + 12);
256 				ascq = *((uint8_t *)rsp + 32 + 13);
257 			}
258 
259 #ifdef SAN_DIAG_SUPPORT
260 			emlxs_log_sd_scsi_check_event(port,
261 			    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
262 			    scsi_opcode, sense, asc, ascq);
263 #endif
264 		}
265 		/* Status is not good and this is not a check condition */
266 		/* No data should have been sent */
267 		else {
268 			check_underrun = 0;
269 		}
270 
271 		/* Get the residual underrun count reported by the SCSI reply */
272 		rsp_data_resid = (pkt->pkt_datalen &&
273 		    rsp->fcp_u.fcp_status.resid_under) ? LE_SWAP32(rsp->
274 		    fcp_resid) : 0;
275 
276 		/* Set the pkt resp_resid field */
277 		pkt->pkt_resp_resid = 0;
278 
279 		/* Set the pkt data_resid field */
280 		if (pkt->pkt_datalen &&
281 		    (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
282 			/*
283 			 * Get the residual underrun count reported by
284 			 * our adapter
285 			 */
286 			pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
287 
288 #ifdef SAN_DIAG_SUPPORT
289 			if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
290 				emlxs_log_sd_fc_rdchk_event(port,
291 				    (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
292 				    scsi_opcode, pkt->pkt_data_resid);
293 			}
294 #endif
295 
296 			/* Get the actual amount of data transferred */
297 			data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
298 
299 			/*
300 			 * If the residual being reported by the adapter is
301 			 * greater than the residual being reported in the
302 			 * reply, then we have a true underrun.
303 			 */
304 			if (check_underrun &&
305 			    (pkt->pkt_data_resid > rsp_data_resid)) {
306 				switch (scsi_opcode) {
307 				case SCSI_INQUIRY:
308 					scsi_dl = scsi_cmd[16];
309 					break;
310 
311 				case SCSI_RX_DIAG:
312 					scsi_dl =
313 					    (scsi_cmd[15] * 0x100) +
314 					    scsi_cmd[16];
315 					break;
316 
317 				default:
318 					scsi_dl = pkt->pkt_datalen;
319 				}
320 
321 #ifdef FCP_UNDERRUN_PATCH1
322 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
323 				/*
324 				 * If status is not good and no data was
325 				 * actually transferred, then we must fix
326 				 * the issue
327 				 */
328 				if ((scsi_status != SCSI_STAT_GOOD) &&
329 				    (data_rx == 0)) {
330 					fix_it = 1;
331 
332 					EMLXS_MSGF(EMLXS_CONTEXT,
333 					    &emlxs_fcp_completion_error_msg,
334 					    "Underrun(1). Fixed. "
335 					    "did=0x%06x sbp=%p cmd=%02x "
336 					    "dl=%d,%d rx=%d rsp=%d",
337 					    did, sbp, scsi_opcode,
338 					    pkt->pkt_datalen, scsi_dl,
339 					    (pkt->pkt_datalen -
340 					    cmd->un.fcpi.fcpi_parm),
341 					    rsp_data_resid);
342 
343 				}
344 }
345 #endif /* FCP_UNDERRUN_PATCH1 */
346 
347 
348 #ifdef FCP_UNDERRUN_PATCH2
349 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
350 				if ((scsi_status == SCSI_STAT_GOOD)) {
351 					emlxs_msg_t	*msg;
352 
353 					msg = &emlxs_fcp_completion_error_msg;
354 					/*
355 					 * If status is good and this is an
356 					 * inquiry request and the amount of
357 					 * data
358 					 */
359 					/*
360 					 * requested <= data received, then we
361 					 * must fix the issue.
362 					 */
363 
364 					if ((scsi_opcode == SCSI_INQUIRY) &&
365 					    (pkt->pkt_datalen >= data_rx) &&
366 					    (scsi_dl <= data_rx)) {
367 						fix_it = 1;
368 
369 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
370 						    "Underrun(2). Fixed. "
371 						    "did=0x%06x sbp=%p "
372 						    "cmd=%02x dl=%d,%d "
373 						    "rx=%d rsp=%d",
374 						    did, sbp, scsi_opcode,
375 						    pkt->pkt_datalen, scsi_dl,
376 						    data_rx, rsp_data_resid);
377 
378 					}
379 
380 					/*
381 					 * If status is good and this is an
382 					 * inquiry request and the amount of
383 					 * data requested >= 128 bytes, but
384 					 * only 128 bytes were received,
385 					 * then we must fix the issue.
386 					 */
387 					else if ((scsi_opcode ==
388 					    SCSI_INQUIRY) &&
389 					    (pkt->pkt_datalen >= 128) &&
390 					    (scsi_dl >= 128) &&
391 					    (data_rx == 128)) {
392 						fix_it = 1;
393 
394 						EMLXS_MSGF(EMLXS_CONTEXT, msg,
395 						    "Underrun(3). Fixed. "
396 						    "did=0x%06x sbp=%p "
397 						    "cmd=%02x dl=%d,%d "
398 						    "rx=%d rsp=%d",
399 						    did, sbp, scsi_opcode,
400 						    pkt->pkt_datalen, scsi_dl,
401 						    data_rx, rsp_data_resid);
402 
403 					}
404 
405 				}
406 }
407 #endif /* FCP_UNDERRUN_PATCH2 */
408 
409 				/*
410 				 * Check if SCSI response payload should be
411 				 * fixed or if a DATA_UNDERRUN should be
412 				 * reported
413 				 */
414 				if (fix_it) {
415 					/*
416 					 * Fix the SCSI response payload itself
417 					 */
418 					rsp->fcp_u.fcp_status.resid_under = 1;
419 					rsp->fcp_resid =
420 					    LE_SWAP32(pkt->pkt_data_resid);
421 				} else {
422 					/*
423 					 * Change the status from
424 					 * IOSTAT_FCP_RSP_ERROR to
425 					 * IOSTAT_DATA_UNDERRUN
426 					 */
427 					iostat = IOSTAT_DATA_UNDERRUN;
428 					pkt->pkt_data_resid =
429 					    pkt->pkt_datalen;
430 				}
431 			}
432 
433 			/*
434 			 * If the residual being reported by the adapter is
435 			 * less than the residual being reported in the reply,
436 			 * then we have a true overrun. Since we don't know
437 			 * where the extra data came from or went to then we
438 			 * cannot trust anything we received
439 			 */
440 			else if (rsp_data_resid > pkt->pkt_data_resid) {
441 				/*
442 				 * Change the status from
443 				 * IOSTAT_FCP_RSP_ERROR to
444 				 * IOSTAT_DATA_OVERRUN
445 				 */
446 				iostat = IOSTAT_DATA_OVERRUN;
447 				pkt->pkt_data_resid = pkt->pkt_datalen;
448 			}
449 		} else {	/* pkt->pkt_datalen==0 or FC_PKT_FCP_WRITE */
450 
451 			/* Report whatever the target reported */
452 			pkt->pkt_data_resid = rsp_data_resid;
453 		}
454 	}
455 
456 	/* Print completion message */
457 	switch (iostat) {
458 	case IOSTAT_SUCCESS:
459 		/* Build SCSI GOOD status */
460 		if (pkt->pkt_rsplen) {
461 			bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
462 		}
463 		break;
464 
465 	case IOSTAT_FCP_RSP_ERROR:
466 		break;
467 
468 	case IOSTAT_REMOTE_STOP:
469 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
470 		    "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
471 		    scsi_opcode);
472 		break;
473 
474 	case IOSTAT_LOCAL_REJECT:
475 		localstat = cmd->un.grsp.perr.statLocalError;
476 
477 		switch (localstat) {
478 		case IOERR_SEQUENCE_TIMEOUT:
479 			EMLXS_MSGF(EMLXS_CONTEXT,
480 			    &emlxs_fcp_completion_error_msg,
481 			    "Local reject. "
482 			    "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
483 			    emlxs_error_xlate(localstat), did, sbp,
484 			    scsi_opcode, pkt->pkt_timeout);
485 			break;
486 
487 		default:
488 			EMLXS_MSGF(EMLXS_CONTEXT,
489 			    &emlxs_fcp_completion_error_msg,
490 			    "Local reject. %s did=0x%06x sbp=%p cmd=%02x",
491 			    emlxs_error_xlate(localstat), did, sbp,
492 			    scsi_opcode);
493 		}
494 
495 		break;
496 
497 	case IOSTAT_NPORT_RJT:
498 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
499 		    "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
500 		    scsi_opcode);
501 		break;
502 
503 	case IOSTAT_FABRIC_RJT:
504 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
505 		    "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
506 		    scsi_opcode);
507 		break;
508 
509 	case IOSTAT_NPORT_BSY:
510 #ifdef SAN_DIAG_SUPPORT
511 		ndlp = (NODELIST *)iocbq->node;
512 		emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
513 #endif
514 
515 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
516 		    "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
517 		    scsi_opcode);
518 		break;
519 
520 	case IOSTAT_FABRIC_BSY:
521 #ifdef SAN_DIAG_SUPPORT
522 		ndlp = (NODELIST *)iocbq->node;
523 		emlxs_log_sd_fc_bsy_event(port, NULL);
524 #endif
525 
526 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
527 		    "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
528 		    scsi_opcode);
529 		break;
530 
531 	case IOSTAT_INTERMED_RSP:
532 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
533 		    "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
534 		    sbp, scsi_opcode);
535 		break;
536 
537 	case IOSTAT_LS_RJT:
538 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
539 		    "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
540 		    scsi_opcode);
541 		break;
542 
543 	case IOSTAT_DATA_UNDERRUN:
544 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
545 		    "Underrun. did=0x%06x sbp=%p cmd=%02x "
546 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
547 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
548 		    rsp_data_resid, scsi_status, sense, asc, ascq);
549 		break;
550 
551 	case IOSTAT_DATA_OVERRUN:
552 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
553 		    "Overrun. did=0x%06x sbp=%p cmd=%02x "
554 		    "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
555 		    did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
556 		    rsp_data_resid, scsi_status, sense, asc, ascq);
557 		break;
558 
559 	default:
560 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
561 		    "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
562 		    iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
563 		    scsi_opcode);
564 		break;
565 	}
566 
567 done:
568 
569 	if (iostat == IOSTAT_SUCCESS) {
570 		HBASTATS.FcpGood++;
571 	} else {
572 		HBASTATS.FcpError++;
573 	}
574 
575 	mutex_exit(&sbp->mtx);
576 
577 	emlxs_pkt_complete(sbp, iostat, localstat, 0);
578 
579 	return;
580 
581 } /* emlxs_handle_fcp_event() */
582 
583 
584 
585 /*
586  *  emlxs_post_buffer
587  *
588  *  This routine will post count buffers to the
589  *  ring with the QUE_RING_BUF_CN command. This
590  *  allows 2 buffers / command to be posted.
591  *  Returns the number of buffers NOT posted.
592  */
593 /* SLI3 */
594 extern int
595 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
596 {
597 	emlxs_port_t *port = &PPORT;
598 	IOCB *icmd;
599 	IOCBQ *iocbq;
600 	MATCHMAP *mp;
601 	uint16_t tag;
602 	uint32_t maxqbuf;
603 	int32_t i;
604 	int32_t j;
605 	uint32_t seg;
606 	uint32_t size;
607 
608 	mp = 0;
609 	maxqbuf = 2;
610 	tag = (uint16_t)cnt;
611 	cnt += rp->fc_missbufcnt;
612 
613 	if (rp->ringno == hba->channel_els) {
614 		seg = MEM_BUF;
615 		size = MEM_ELSBUF_SIZE;
616 	} else if (rp->ringno == hba->channel_ip) {
617 		seg = MEM_IPBUF;
618 		size = MEM_IPBUF_SIZE;
619 	} else if (rp->ringno == hba->channel_ct) {
620 		seg = MEM_CTBUF;
621 		size = MEM_CTBUF_SIZE;
622 	}
623 #ifdef SFCT_SUPPORT
624 	else if (rp->ringno == hba->CHANNEL_FCT) {
625 		seg = MEM_FCTBUF;
626 		size = MEM_FCTBUF_SIZE;
627 	}
628 #endif /* SFCT_SUPPORT */
629 	else {
630 		return (0);
631 	}
632 
633 	/*
634 	 * While there are buffers to post
635 	 */
636 	while (cnt) {
637 		if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == 0) {
638 			rp->fc_missbufcnt = cnt;
639 			return (cnt);
640 		}
641 
642 		iocbq->channel = (void *)&hba->chan[rp->ringno];
643 		iocbq->port = (void *)port;
644 		iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
645 
646 		icmd = &iocbq->iocb;
647 
648 		/*
649 		 * Max buffers can be posted per command
650 		 */
651 		for (i = 0; i < maxqbuf; i++) {
652 			if (cnt <= 0)
653 				break;
654 
655 			/* fill in BDEs for command */
656 			if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg, 1))
657 			    == 0) {
658 				icmd->ULPBDECOUNT = i;
659 				for (j = 0; j < i; j++) {
660 					mp = EMLXS_GET_VADDR(hba, rp, icmd);
661 					if (mp) {
662 						(void) emlxs_mem_put(hba, seg,
663 						    (uint8_t *)mp);
664 					}
665 				}
666 
667 				rp->fc_missbufcnt = cnt + i;
668 
669 				(void) emlxs_mem_put(hba, MEM_IOCB,
670 				    (uint8_t *)iocbq);
671 
672 				return (cnt + i);
673 			}
674 
675 			/*
676 			 * map that page and save the address pair for lookup
677 			 * later
678 			 */
679 			emlxs_mem_map_vaddr(hba,
680 			    rp,
681 			    mp,
682 			    (uint32_t *)&icmd->un.cont64[i].addrHigh,
683 			    (uint32_t *)&icmd->un.cont64[i].addrLow);
684 
685 			icmd->un.cont64[i].tus.f.bdeSize = size;
686 			icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
687 
688 			/*
689 			 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
690 			 *    "UB Post: ring=%d addr=%08x%08x size=%d",
691 			 *    rp->ringno, icmd->un.cont64[i].addrHigh,
692 			 *    icmd->un.cont64[i].addrLow, size);
693 			 */
694 
695 			cnt--;
696 		}
697 
698 		icmd->ULPIOTAG = tag;
699 		icmd->ULPBDECOUNT = i;
700 		icmd->ULPLE = 1;
701 		icmd->ULPOWNER = OWN_CHIP;
702 		/* used for delimiter between commands */
703 		iocbq->bp = (uint8_t *)mp;
704 
705 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
706 	}
707 
708 	rp->fc_missbufcnt = 0;
709 
710 	return (0);
711 
712 } /* emlxs_post_buffer() */
713 
714 
715 extern int
716 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
717 {
718 	emlxs_hba_t *hba = HBA;
719 	emlxs_config_t *cfg;
720 	NODELIST *nlp;
721 	fc_affected_id_t *aid;
722 	uint32_t mask;
723 	uint32_t aff_d_id;
724 	uint32_t linkdown;
725 	uint32_t vlinkdown;
726 	uint32_t action;
727 	int i;
728 	uint32_t unreg_vpi;
729 	uint32_t update;
730 	uint32_t adisc_support;
731 	uint8_t format;
732 
733 	/* Target mode only uses this routine for linkdowns */
734 	if (port->tgt_mode && (scope != 0xffffffff) && (scope != 0xfeffffff)) {
735 		return (0);
736 	}
737 
738 	cfg = &CFG;
739 	aid = (fc_affected_id_t *)&scope;
740 	linkdown = 0;
741 	vlinkdown = 0;
742 	unreg_vpi = 0;
743 	update = 0;
744 
745 	if (!(port->flag & EMLXS_PORT_BOUND)) {
746 		return (0);
747 	}
748 
749 	format = aid->aff_format;
750 
751 	switch (format) {
752 	case 0:	/* Port */
753 		mask = 0x00ffffff;
754 		break;
755 
756 	case 1:	/* Area */
757 		mask = 0x00ffff00;
758 		break;
759 
760 	case 2:	/* Domain */
761 		mask = 0x00ff0000;
762 		break;
763 
764 	case 3:	/* Network */
765 		mask = 0x00000000;
766 		break;
767 
768 #ifdef DHCHAP_SUPPORT
769 	case 0xfe:	/* Virtual link down */
770 		mask = 0x00000000;
771 		vlinkdown = 1;
772 		break;
773 #endif /* DHCHAP_SUPPORT */
774 
775 	case 0xff:	/* link is down */
776 		mask = 0x00000000;
777 		linkdown = 1;
778 		break;
779 
780 	}
781 
782 	aff_d_id = aid->aff_d_id & mask;
783 
784 
785 	/*
786 	 * If link is down then this is a hard shutdown and flush
787 	 * If link not down then this is a soft shutdown and flush
788 	 * (e.g. RSCN)
789 	 */
790 	if (linkdown) {
791 		mutex_enter(&EMLXS_PORT_LOCK);
792 
793 		port->flag &= EMLXS_PORT_LINKDOWN_MASK;
794 		port->prev_did = port->did;
795 		port->did = 0;
796 
797 		if (port->ulp_statec != FC_STATE_OFFLINE) {
798 			port->ulp_statec = FC_STATE_OFFLINE;
799 			update = 1;
800 		}
801 
802 		mutex_exit(&EMLXS_PORT_LOCK);
803 
804 		/* Tell ULP about it */
805 		if (update) {
806 			if (port->flag & EMLXS_PORT_BOUND) {
807 				if (port->vpi == 0) {
808 					EMLXS_MSGF(EMLXS_CONTEXT,
809 					    &emlxs_link_down_msg, NULL);
810 				}
811 
812 				if (port->ini_mode) {
813 					port->ulp_statec_cb(port->ulp_handle,
814 					    FC_STATE_OFFLINE);
815 				}
816 #ifdef SFCT_SUPPORT
817 				else if (port->tgt_mode) {
818 					emlxs_fct_link_down(port);
819 				}
820 #endif /* SFCT_SUPPORT */
821 
822 			} else {
823 				if (port->vpi == 0) {
824 					EMLXS_MSGF(EMLXS_CONTEXT,
825 					    &emlxs_link_down_msg, "*");
826 				}
827 			}
828 
829 
830 		}
831 
832 		unreg_vpi = 1;
833 
834 #ifdef DHCHAP_SUPPORT
835 		/* Stop authentication with all nodes */
836 		emlxs_dhc_auth_stop(port, NULL);
837 #endif /* DHCHAP_SUPPORT */
838 
839 		/* Flush the base node */
840 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
841 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
842 
843 		/* Flush any pending ub buffers */
844 		emlxs_ub_flush(port);
845 	}
846 #ifdef DHCHAP_SUPPORT
847 	/* virtual link down */
848 	else if (vlinkdown) {
849 		mutex_enter(&EMLXS_PORT_LOCK);
850 
851 		if (port->ulp_statec != FC_STATE_OFFLINE) {
852 			port->ulp_statec = FC_STATE_OFFLINE;
853 			update = 1;
854 		}
855 
856 		mutex_exit(&EMLXS_PORT_LOCK);
857 
858 		/* Tell ULP about it */
859 		if (update) {
860 			if (port->flag & EMLXS_PORT_BOUND) {
861 				if (port->vpi == 0) {
862 					EMLXS_MSGF(EMLXS_CONTEXT,
863 					    &emlxs_link_down_msg,
864 					    "Switch authentication failed.");
865 				}
866 
867 #ifdef SFCT_SUPPORT
868 				if (port->tgt_mode) {
869 					emlxs_fct_link_down(port);
870 
871 				} else if (port->ini_mode) {
872 					port->ulp_statec_cb(port->ulp_handle,
873 					    FC_STATE_OFFLINE);
874 				}
875 #else
876 				port->ulp_statec_cb(port->ulp_handle,
877 				    FC_STATE_OFFLINE);
878 #endif	/* SFCT_SUPPORT */
879 			} else {
880 				if (port->vpi == 0) {
881 					EMLXS_MSGF(EMLXS_CONTEXT,
882 					    &emlxs_link_down_msg,
883 					    "Switch authentication failed. *");
884 				}
885 			}
886 
887 
888 		}
889 
890 		/* Flush the base node */
891 		(void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
892 		(void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
893 	}
894 #endif /* DHCHAP_SUPPORT */
895 
896 	if (port->tgt_mode) {
897 		goto done;
898 	}
899 
900 	/* Set the node tags */
901 	/* We will process all nodes with this tag */
902 	rw_enter(&port->node_rwlock, RW_READER);
903 	for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
904 		nlp = port->node_table[i];
905 		while (nlp != NULL) {
906 			nlp->nlp_tag = 1;
907 			nlp = nlp->nlp_list_next;
908 		}
909 	}
910 	rw_exit(&port->node_rwlock);
911 
912 	if (hba->flag & FC_ONLINE_MODE) {
913 		adisc_support = cfg[CFG_ADISC_SUPPORT].current;
914 	} else {
915 		adisc_support = 0;
916 	}
917 
918 	/* Check ADISC support level */
919 	switch (adisc_support) {
920 	case 0:	/* No support - Flush all IO to all matching nodes */
921 
922 		for (;;) {
923 			/*
924 			 * We need to hold the locks this way because
925 			 * emlxs_mb_unreg_did and the flush routines enter the
926 			 * same locks. Also, when we release the lock the list
927 			 * can change out from under us.
928 			 */
929 
930 			/* Find first node */
931 			rw_enter(&port->node_rwlock, RW_READER);
932 			action = 0;
933 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
934 				nlp = port->node_table[i];
935 				while (nlp != NULL) {
936 					if (!nlp->nlp_tag) {
937 						nlp = nlp->nlp_list_next;
938 						continue;
939 					}
940 					nlp->nlp_tag = 0;
941 
942 					/*
943 					 * Check for any device that matches
944 					 * our mask
945 					 */
946 					if ((nlp->nlp_DID & mask) == aff_d_id) {
947 						if (linkdown) {
948 							action = 1;
949 							break;
950 						} else { /* Must be an RCSN */
951 
952 							action = 2;
953 							break;
954 						}
955 					}
956 					nlp = nlp->nlp_list_next;
957 				}
958 
959 				if (action) {
960 					break;
961 				}
962 			}
963 			rw_exit(&port->node_rwlock);
964 
965 
966 			/* Check if nothing was found */
967 			if (action == 0) {
968 				break;
969 			} else if (action == 1) {
970 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
971 				    NULL, NULL, NULL);
972 			} else if (action == 2) {
973 #ifdef DHCHAP_SUPPORT
974 				emlxs_dhc_auth_stop(port, nlp);
975 #endif /* DHCHAP_SUPPORT */
976 
977 				/*
978 				 * Close the node for any further normal IO
979 				 * A PLOGI with reopen the node
980 				 */
981 				emlxs_node_close(port, nlp,
982 				    hba->channel_fcp, 60);
983 				emlxs_node_close(port, nlp,
984 				    hba->channel_ip, 60);
985 
986 				/* Flush tx queue */
987 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
988 
989 				/* Flush chip queue */
990 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
991 			}
992 
993 		}
994 
995 		break;
996 
997 	case 1:	/* Partial support - Flush IO for non-FCP2 matching nodes */
998 
999 		for (;;) {
1000 
1001 			/*
1002 			 * We need to hold the locks this way because
1003 			 * emlxs_mb_unreg_did and the flush routines enter the
1004 			 * same locks. Also, when we release the lock the list
1005 			 * can change out from under us.
1006 			 */
1007 			rw_enter(&port->node_rwlock, RW_READER);
1008 			action = 0;
1009 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1010 				nlp = port->node_table[i];
1011 				while (nlp != NULL) {
1012 					if (!nlp->nlp_tag) {
1013 						nlp = nlp->nlp_list_next;
1014 						continue;
1015 					}
1016 					nlp->nlp_tag = 0;
1017 
1018 					/*
1019 					 * Check for special FCP2 target device
1020 					 * that matches our mask
1021 					 */
1022 					if ((nlp->nlp_fcp_info &
1023 					    NLP_FCP_TGT_DEVICE) &&
1024 					    (nlp-> nlp_fcp_info &
1025 					    NLP_FCP_2_DEVICE) &&
1026 					    (nlp->nlp_DID & mask) ==
1027 					    aff_d_id) {
1028 						action = 3;
1029 						break;
1030 					}
1031 
1032 					/*
1033 					 * Check for any other device that
1034 					 * matches our mask
1035 					 */
1036 					else if ((nlp->nlp_DID & mask) ==
1037 					    aff_d_id) {
1038 						if (linkdown) {
1039 							action = 1;
1040 							break;
1041 						} else { /* Must be an RSCN */
1042 
1043 							action = 2;
1044 							break;
1045 						}
1046 					}
1047 
1048 					nlp = nlp->nlp_list_next;
1049 				}
1050 
1051 				if (action) {
1052 					break;
1053 				}
1054 			}
1055 			rw_exit(&port->node_rwlock);
1056 
1057 			/* Check if nothing was found */
1058 			if (action == 0) {
1059 				break;
1060 			} else if (action == 1) {
1061 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1062 				    NULL, NULL, NULL);
1063 			} else if (action == 2) {
1064 #ifdef DHCHAP_SUPPORT
1065 				emlxs_dhc_auth_stop(port, nlp);
1066 #endif /* DHCHAP_SUPPORT */
1067 
1068 				/*
1069 				 * Close the node for any further normal IO
1070 				 * A PLOGI with reopen the node
1071 				 */
1072 				emlxs_node_close(port, nlp,
1073 				    hba->channel_fcp, 60);
1074 				emlxs_node_close(port, nlp,
1075 				    hba->channel_ip, 60);
1076 
1077 				/* Flush tx queue */
1078 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1079 
1080 				/* Flush chip queue */
1081 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1082 
1083 			} else if (action == 3) {	/* FCP2 devices */
1084 				unreg_vpi = 0;
1085 
1086 #ifdef DHCHAP_SUPPORT
1087 				emlxs_dhc_auth_stop(port, nlp);
1088 #endif /* DHCHAP_SUPPORT */
1089 
1090 				/*
1091 				 * Close the node for any further normal IO
1092 				 * An ADISC or a PLOGI with reopen the node
1093 				 */
1094 				emlxs_node_close(port, nlp,
1095 				    hba->channel_fcp, -1);
1096 				emlxs_node_close(port, nlp, hba->channel_ip,
1097 				    ((linkdown) ? 0 : 60));
1098 
1099 				/* Flush tx queues except for FCP ring */
1100 				(void) emlxs_tx_node_flush(port, nlp,
1101 				    &hba->chan[hba->channel_ct], 0, 0);
1102 				(void) emlxs_tx_node_flush(port, nlp,
1103 				    &hba->chan[hba->channel_els], 0, 0);
1104 				(void) emlxs_tx_node_flush(port, nlp,
1105 				    &hba->chan[hba->channel_ip], 0, 0);
1106 
1107 				/* Flush chip queues except for FCP ring */
1108 				(void) emlxs_chipq_node_flush(port,
1109 				    &hba->chan[hba->channel_ct], nlp, 0);
1110 				(void) emlxs_chipq_node_flush(port,
1111 				    &hba->chan[hba->channel_els], nlp, 0);
1112 				(void) emlxs_chipq_node_flush(port,
1113 				    &hba->chan[hba->channel_ip], nlp, 0);
1114 			}
1115 		}
1116 		break;
1117 
1118 	case 2:	/* Full support - Hold FCP IO to FCP target matching nodes */
1119 
1120 		if (!linkdown && !vlinkdown) {
1121 			break;
1122 		}
1123 
1124 		for (;;) {
1125 			/*
1126 			 * We need to hold the locks this way because
1127 			 * emlxs_mb_unreg_did and the flush routines enter the
1128 			 * same locks. Also, when we release the lock the list
1129 			 * can change out from under us.
1130 			 */
1131 			rw_enter(&port->node_rwlock, RW_READER);
1132 			action = 0;
1133 			for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1134 				nlp = port->node_table[i];
1135 				while (nlp != NULL) {
1136 					if (!nlp->nlp_tag) {
1137 						nlp = nlp->nlp_list_next;
1138 						continue;
1139 					}
1140 					nlp->nlp_tag = 0;
1141 
1142 					/*
1143 					 * Check for FCP target device that
1144 					 * matches our mask
1145 					 */
1146 					if ((nlp-> nlp_fcp_info &
1147 					    NLP_FCP_TGT_DEVICE) &&
1148 					    (nlp->nlp_DID & mask) ==
1149 					    aff_d_id) {
1150 						action = 3;
1151 						break;
1152 					}
1153 
1154 					/*
1155 					 * Check for any other device that
1156 					 * matches our mask
1157 					 */
1158 					else if ((nlp->nlp_DID & mask) ==
1159 					    aff_d_id) {
1160 						if (linkdown) {
1161 							action = 1;
1162 							break;
1163 						} else { /* Must be an RSCN */
1164 
1165 							action = 2;
1166 							break;
1167 						}
1168 					}
1169 
1170 					nlp = nlp->nlp_list_next;
1171 				}
1172 				if (action) {
1173 					break;
1174 				}
1175 			}
1176 			rw_exit(&port->node_rwlock);
1177 
1178 			/* Check if nothing was found */
1179 			if (action == 0) {
1180 				break;
1181 			} else if (action == 1) {
1182 				(void) emlxs_mb_unreg_did(port, nlp->nlp_DID,
1183 				    NULL, NULL, NULL);
1184 			} else if (action == 2) {
1185 				/*
1186 				 * Close the node for any further normal IO
1187 				 * A PLOGI with reopen the node
1188 				 */
1189 				emlxs_node_close(port, nlp,
1190 				    hba->channel_fcp, 60);
1191 				emlxs_node_close(port, nlp,
1192 				    hba->channel_ip, 60);
1193 
1194 				/* Flush tx queue */
1195 				(void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1196 
1197 				/* Flush chip queue */
1198 				(void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1199 
1200 			} else if (action == 3) {	/* FCP2 devices */
1201 				unreg_vpi = 0;
1202 
1203 				/*
1204 				 * Close the node for any further normal IO
1205 				 * An ADISC or a PLOGI with reopen the node
1206 				 */
1207 				emlxs_node_close(port, nlp,
1208 				    hba->channel_fcp, -1);
1209 				emlxs_node_close(port, nlp, hba->channel_ip,
1210 				    ((linkdown) ? 0 : 60));
1211 
1212 				/* Flush tx queues except for FCP ring */
1213 				(void) emlxs_tx_node_flush(port, nlp,
1214 				    &hba->chan[hba->channel_ct], 0, 0);
1215 				(void) emlxs_tx_node_flush(port, nlp,
1216 				    &hba->chan[hba->channel_els], 0, 0);
1217 				(void) emlxs_tx_node_flush(port, nlp,
1218 				    &hba->chan[hba->channel_ip], 0, 0);
1219 
1220 				/* Flush chip queues except for FCP ring */
1221 				(void) emlxs_chipq_node_flush(port,
1222 				    &hba->chan[hba->channel_ct], nlp, 0);
1223 				(void) emlxs_chipq_node_flush(port,
1224 				    &hba->chan[hba->channel_els], nlp, 0);
1225 				(void) emlxs_chipq_node_flush(port,
1226 				    &hba->chan[hba->channel_ip], nlp, 0);
1227 			}
1228 		}
1229 
1230 		break;
1231 
1232 	}	/* switch() */
1233 
1234 done:
1235 
1236 	if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
1237 		if (unreg_vpi) {
1238 			(void) emlxs_mb_unreg_vpi(port);
1239 		}
1240 	}
1241 
1242 	return (0);
1243 
1244 } /* emlxs_port_offline() */
1245 
1246 
1247 extern void
1248 emlxs_port_online(emlxs_port_t *vport)
1249 {
1250 	emlxs_hba_t *hba = vport->hba;
1251 	emlxs_port_t *port = &PPORT;
1252 	uint32_t state;
1253 	uint32_t update;
1254 	uint32_t npiv_linkup;
1255 	char topology[32];
1256 	char linkspeed[32];
1257 	char mode[32];
1258 
1259 	/*
1260 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1261 	 *    "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1262 	 */
1263 
1264 	if ((vport->vpi > 0) &&
1265 	    (!(hba->flag & FC_NPIV_ENABLED) ||
1266 	    !(hba->flag & FC_NPIV_SUPPORTED))) {
1267 		return;
1268 	}
1269 
1270 	if (!(vport->flag & EMLXS_PORT_BOUND) ||
1271 	    !(vport->flag & EMLXS_PORT_ENABLE)) {
1272 		return;
1273 	}
1274 
1275 	mutex_enter(&EMLXS_PORT_LOCK);
1276 
1277 	/* Check for mode */
1278 	if (port->tgt_mode) {
1279 		(void) strcpy(mode, ", target");
1280 	} else if (port->ini_mode) {
1281 		(void) strcpy(mode, ", initiator");
1282 	} else {
1283 		(void) strcpy(mode, "");
1284 	}
1285 
1286 	/* Check for loop topology */
1287 	if (hba->topology == TOPOLOGY_LOOP) {
1288 		state = FC_STATE_LOOP;
1289 		(void) strcpy(topology, ", loop");
1290 	} else {
1291 		state = FC_STATE_ONLINE;
1292 		(void) strcpy(topology, ", fabric");
1293 	}
1294 
1295 	/* Set the link speed */
1296 	switch (hba->linkspeed) {
1297 	case 0:
1298 		(void) strcpy(linkspeed, "Gb");
1299 		state |= FC_STATE_1GBIT_SPEED;
1300 		break;
1301 
1302 	case LA_1GHZ_LINK:
1303 		(void) strcpy(linkspeed, "1Gb");
1304 		state |= FC_STATE_1GBIT_SPEED;
1305 		break;
1306 	case LA_2GHZ_LINK:
1307 		(void) strcpy(linkspeed, "2Gb");
1308 		state |= FC_STATE_2GBIT_SPEED;
1309 		break;
1310 	case LA_4GHZ_LINK:
1311 		(void) strcpy(linkspeed, "4Gb");
1312 		state |= FC_STATE_4GBIT_SPEED;
1313 		break;
1314 	case LA_8GHZ_LINK:
1315 		(void) strcpy(linkspeed, "8Gb");
1316 		state |= FC_STATE_8GBIT_SPEED;
1317 		break;
1318 	case LA_10GHZ_LINK:
1319 		(void) strcpy(linkspeed, "10Gb");
1320 		state |= FC_STATE_10GBIT_SPEED;
1321 		break;
1322 	default:
1323 		(void) sprintf(linkspeed, "unknown(0x%x)", hba->linkspeed);
1324 		break;
1325 	}
1326 
1327 	npiv_linkup = 0;
1328 	update = 0;
1329 
1330 	if ((hba->state >= FC_LINK_UP) &&
1331 	    !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1332 		update = 1;
1333 		vport->ulp_statec = state;
1334 
1335 		if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1336 			hba->flag |= FC_NPIV_LINKUP;
1337 			npiv_linkup = 1;
1338 		}
1339 	}
1340 
1341 	mutex_exit(&EMLXS_PORT_LOCK);
1342 
1343 
1344 	/*
1345 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1346 	 *    "linkup_callback: update=%d vpi=%d flag=%d fc_flag=%x state=%x"
1347 	 *    "statec=%x", update, vport->vpi, npiv_linkup, hba->flag,
1348 	 *    hba->state, vport->ulp_statec);
1349 	 */
1350 
1351 	if (update) {
1352 		if (vport->flag & EMLXS_PORT_BOUND) {
1353 			if (vport->vpi == 0) {
1354 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1355 				    "%s%s%s", linkspeed, topology, mode);
1356 
1357 			} else if (npiv_linkup) {
1358 				EMLXS_MSGF(EMLXS_CONTEXT,
1359 				    &emlxs_npiv_link_up_msg, "%s%s%s",
1360 				    linkspeed, topology, mode);
1361 			}
1362 
1363 			if (vport->ini_mode) {
1364 				vport->ulp_statec_cb(vport->ulp_handle,
1365 				    state);
1366 			}
1367 #ifdef SFCT_SUPPORT
1368 			else if (vport->tgt_mode) {
1369 				emlxs_fct_link_up(vport);
1370 			}
1371 #endif /* SFCT_SUPPORT */
1372 		} else {
1373 			if (vport->vpi == 0) {
1374 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1375 				    "%s%s%s *", linkspeed, topology, mode);
1376 
1377 			} else if (npiv_linkup) {
1378 				EMLXS_MSGF(EMLXS_CONTEXT,
1379 				    &emlxs_npiv_link_up_msg, "%s%s%s *",
1380 				    linkspeed, topology, mode);
1381 			}
1382 		}
1383 
1384 		/* Check for waiting threads */
1385 		if (vport->vpi == 0) {
1386 			mutex_enter(&EMLXS_LINKUP_LOCK);
1387 			if (hba->linkup_wait_flag == TRUE) {
1388 				hba->linkup_wait_flag = FALSE;
1389 				cv_broadcast(&EMLXS_LINKUP_CV);
1390 			}
1391 			mutex_exit(&EMLXS_LINKUP_LOCK);
1392 		}
1393 
1394 		/* Flush any pending ub buffers */
1395 		emlxs_ub_flush(vport);
1396 	}
1397 
1398 	return;
1399 
1400 } /* emlxs_port_online() */
1401 
1402 
1403 extern void
1404 emlxs_linkdown(emlxs_hba_t *hba)
1405 {
1406 	emlxs_port_t *port = &PPORT;
1407 	RPIobj_t *rp;
1408 	int i;
1409 
1410 	mutex_enter(&EMLXS_PORT_LOCK);
1411 
1412 	if (hba->state > FC_LINK_DOWN) {
1413 		HBASTATS.LinkDown++;
1414 		EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1415 	}
1416 
1417 	/* Filter hba flags */
1418 	hba->flag &= FC_LINKDOWN_MASK;
1419 	hba->discovery_timer = 0;
1420 	hba->linkup_timer = 0;
1421 
1422 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1423 		rp = hba->sli.sli4.RPIp;
1424 		for (i = 0; i < hba->sli.sli4.RPICount; i++) {
1425 			if (rp->state & RESOURCE_ALLOCATED) {
1426 				rp->state |= RESOURCE_RPI_PAUSED;
1427 			}
1428 			rp++;
1429 		}
1430 	}
1431 
1432 	mutex_exit(&EMLXS_PORT_LOCK);
1433 
1434 	for (i = 0; i < MAX_VPORTS; i++) {
1435 		port = &VPORT(i);
1436 
1437 		if (!(port->flag & EMLXS_PORT_BOUND)) {
1438 			continue;
1439 		}
1440 
1441 		(void) emlxs_port_offline(port, 0xffffffff);
1442 
1443 	}
1444 
1445 	return;
1446 
1447 } /* emlxs_linkdown() */
1448 
1449 
1450 extern void
1451 emlxs_linkup(emlxs_hba_t *hba)
1452 {
1453 	emlxs_port_t *port = &PPORT;
1454 	emlxs_config_t *cfg = &CFG;
1455 
1456 	mutex_enter(&EMLXS_PORT_LOCK);
1457 
1458 	HBASTATS.LinkUp++;
1459 	EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1460 
1461 #ifdef MENLO_SUPPORT
1462 	if (hba->flag & FC_MENLO_MODE) {
1463 		mutex_exit(&EMLXS_PORT_LOCK);
1464 
1465 		/*
1466 		 * Trigger linkup CV and don't start linkup & discovery
1467 		 * timers
1468 		 */
1469 		mutex_enter(&EMLXS_LINKUP_LOCK);
1470 		cv_broadcast(&EMLXS_LINKUP_CV);
1471 		mutex_exit(&EMLXS_LINKUP_LOCK);
1472 
1473 		return;
1474 	}
1475 #endif /* MENLO_SUPPORT */
1476 
1477 	/* Set the linkup & discovery timers */
1478 	hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1479 	hba->discovery_timer =
1480 	    hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1481 	    cfg[CFG_DISC_TIMEOUT].current;
1482 
1483 	mutex_exit(&EMLXS_PORT_LOCK);
1484 
1485 	return;
1486 
1487 } /* emlxs_linkup() */
1488 
1489 
1490 /*
1491  *  emlxs_reset_link
1492  *
1493  *  Description:
1494  *  Called to reset the link with an init_link
1495  *
1496  *    Returns:
1497  *
1498  */
1499 extern int
1500 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1501 {
1502 	emlxs_port_t *port = &PPORT;
1503 	emlxs_config_t *cfg;
1504 	MAILBOXQ *mbq = NULL;
1505 	MAILBOX *mb = NULL;
1506 	int rval = 0;
1507 	int rc;
1508 
1509 	/*
1510 	 * Get a buffer to use for the mailbox command
1511 	 */
1512 	if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1513 	    == NULL) {
1514 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1515 		    "Unable to allocate mailbox buffer.");
1516 		rval = 1;
1517 		goto reset_link_fail;
1518 	}
1519 
1520 	mb = (MAILBOX *)mbq;
1521 
1522 	/* Bring link down first */
1523 	emlxs_mb_down_link(hba, mbq);
1524 
1525 #define	MBXERR_LINK_DOWN	0x33
1526 
1527 	if (wait) {
1528 		wait = MBX_WAIT;
1529 	} else {
1530 		wait = MBX_NOWAIT;
1531 	}
1532 	rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1533 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1534 	    (rc != MBXERR_LINK_DOWN)) {
1535 		rval = 1;
1536 		goto reset_link_fail;
1537 	}
1538 
1539 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1540 	    "Disabling link...");
1541 
1542 	if (linkup) {
1543 		/*
1544 		 * Setup and issue mailbox INITIALIZE LINK command
1545 		 */
1546 
1547 		if (wait == MBX_NOWAIT) {
1548 			if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))
1549 			    == NULL) {
1550 				EMLXS_MSGF(EMLXS_CONTEXT,
1551 				    &emlxs_link_reset_failed_msg,
1552 				    "Unable to allocate mailbox buffer.");
1553 				rval = 1;
1554 				goto reset_link_fail;
1555 			}
1556 			mb = (MAILBOX *)mbq;
1557 		} else {
1558 			/* Reuse mbq from previous mbox */
1559 			mb = (MAILBOX *)mbq;
1560 		}
1561 		cfg = &CFG;
1562 
1563 		emlxs_mb_init_link(hba, mbq,
1564 		    cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1565 
1566 		mb->un.varInitLnk.lipsr_AL_PA = 0;
1567 
1568 		/* Clear the loopback mode */
1569 		mutex_enter(&EMLXS_PORT_LOCK);
1570 		hba->flag &= ~FC_LOOPBACK_MODE;
1571 		hba->loopback_tics = 0;
1572 		mutex_exit(&EMLXS_PORT_LOCK);
1573 
1574 		rc =  EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1575 		if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1576 			rval = 1;
1577 			goto reset_link_fail;
1578 		}
1579 
1580 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1581 	}
1582 
1583 reset_link_fail:
1584 
1585 	if ((wait == MBX_WAIT) && mbq) {
1586 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq);
1587 	}
1588 
1589 	return (rval);
1590 } /* emlxs_reset_link() */
1591 
1592 
1593 extern int
1594 emlxs_online(emlxs_hba_t *hba)
1595 {
1596 	emlxs_port_t *port = &PPORT;
1597 	int32_t rval = 0;
1598 	uint32_t i = 0;
1599 
1600 	/* Make sure adapter is offline or exit trying (30 seconds) */
1601 	while (i++ < 30) {
1602 		/* Check if adapter is already going online */
1603 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1604 			return (0);
1605 		}
1606 
1607 		mutex_enter(&EMLXS_PORT_LOCK);
1608 
1609 		/* Check again */
1610 		if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1611 			mutex_exit(&EMLXS_PORT_LOCK);
1612 			return (0);
1613 		}
1614 
1615 		/* Check if adapter is offline */
1616 		if (hba->flag & FC_OFFLINE_MODE) {
1617 			/* Mark it going online */
1618 			hba->flag &= ~FC_OFFLINE_MODE;
1619 			hba->flag |= FC_ONLINING_MODE;
1620 
1621 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1622 			mutex_exit(&EMLXS_PORT_LOCK);
1623 			break;
1624 		}
1625 
1626 		mutex_exit(&EMLXS_PORT_LOCK);
1627 
1628 		DELAYMS(1000);
1629 	}
1630 
1631 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1632 	    "Going online...");
1633 
1634 	if (rval = EMLXS_SLI_ONLINE(hba)) {
1635 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1636 		    rval);
1637 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1638 
1639 		/* Set FC_OFFLINE_MODE */
1640 		mutex_enter(&EMLXS_PORT_LOCK);
1641 		emlxs_diag_state = DDI_OFFDI;
1642 		hba->flag |= FC_OFFLINE_MODE;
1643 		hba->flag &= ~FC_ONLINING_MODE;
1644 		mutex_exit(&EMLXS_PORT_LOCK);
1645 
1646 		return (rval);
1647 	}
1648 
1649 	/* Start the timer */
1650 	emlxs_timer_start(hba);
1651 
1652 	/* Set FC_ONLINE_MODE */
1653 	mutex_enter(&EMLXS_PORT_LOCK);
1654 	emlxs_diag_state = DDI_ONDI;
1655 	hba->flag |= FC_ONLINE_MODE;
1656 	hba->flag &= ~FC_ONLINING_MODE;
1657 	mutex_exit(&EMLXS_PORT_LOCK);
1658 
1659 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1660 
1661 #ifdef SFCT_SUPPORT
1662 	(void) emlxs_fct_port_initialize(port);
1663 #endif /* SFCT_SUPPORT */
1664 
1665 	return (rval);
1666 
1667 } /* emlxs_online() */
1668 
1669 
1670 extern int
1671 emlxs_offline(emlxs_hba_t *hba)
1672 {
1673 	emlxs_port_t *port = &PPORT;
1674 	uint32_t i = 0;
1675 	int rval = 1;
1676 
1677 	/* Make sure adapter is online or exit trying (30 seconds) */
1678 	while (i++ < 30) {
1679 		/* Check if adapter is already going offline */
1680 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1681 			return (0);
1682 		}
1683 
1684 		mutex_enter(&EMLXS_PORT_LOCK);
1685 
1686 		/* Check again */
1687 		if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1688 			mutex_exit(&EMLXS_PORT_LOCK);
1689 			return (0);
1690 		}
1691 
1692 		/* Check if adapter is online */
1693 		if (hba->flag & FC_ONLINE_MODE) {
1694 			/* Mark it going offline */
1695 			hba->flag &= ~FC_ONLINE_MODE;
1696 			hba->flag |= FC_OFFLINING_MODE;
1697 
1698 			/* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1699 			mutex_exit(&EMLXS_PORT_LOCK);
1700 			break;
1701 		}
1702 
1703 		mutex_exit(&EMLXS_PORT_LOCK);
1704 
1705 		DELAYMS(1000);
1706 	}
1707 
1708 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1709 	    "Going offline...");
1710 
1711 	if (port->ini_mode) {
1712 		/* Flush all IO */
1713 		emlxs_linkdown(hba);
1714 	}
1715 #ifdef SFCT_SUPPORT
1716 	else {
1717 		(void) emlxs_fct_port_shutdown(port);
1718 	}
1719 #endif /* SFCT_SUPPORT */
1720 
1721 	/* Check if adapter was shutdown */
1722 	if (hba->flag & FC_HARDWARE_ERROR) {
1723 		/*
1724 		 * Force mailbox cleanup
1725 		 * This will wake any sleeping or polling threads
1726 		 */
1727 		emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1728 	}
1729 
1730 	/* Pause here for the IO to settle */
1731 	delay(drv_usectohz(1000000));	/* 1 sec */
1732 
1733 	/* Unregister all nodes */
1734 	emlxs_ffcleanup(hba);
1735 
1736 	if (hba->bus_type == SBUS_FC) {
1737 		WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1738 #ifdef FMA_SUPPORT
1739 		/* Access handle validation */
1740 		EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1741 #endif  /* FMA_SUPPORT */
1742 	}
1743 
1744 	/* Stop the timer */
1745 	emlxs_timer_stop(hba);
1746 
1747 	/* For safety flush every iotag list */
1748 	if (emlxs_iotag_flush(hba)) {
1749 		/* Pause here for the IO to flush */
1750 		delay(drv_usectohz(1000));
1751 	}
1752 
1753 	/* Wait for poll command request to settle */
1754 	while (hba->io_poll_count > 0) {
1755 		delay(drv_usectohz(2000000));   /* 2 sec */
1756 	}
1757 
1758 	/* Shutdown the adapter interface */
1759 	EMLXS_SLI_OFFLINE(hba);
1760 
1761 	mutex_enter(&EMLXS_PORT_LOCK);
1762 	hba->flag |= FC_OFFLINE_MODE;
1763 	hba->flag &= ~FC_OFFLINING_MODE;
1764 	emlxs_diag_state = DDI_OFFDI;
1765 	mutex_exit(&EMLXS_PORT_LOCK);
1766 
1767 	rval = 0;
1768 
1769 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1770 
1771 done:
1772 
1773 	return (rval);
1774 
1775 } /* emlxs_offline() */
1776 
1777 
1778 
1779 extern int
1780 emlxs_power_down(emlxs_hba_t *hba)
1781 {
1782 #ifdef FMA_SUPPORT
1783 	emlxs_port_t *port = &PPORT;
1784 #endif  /* FMA_SUPPORT */
1785 	int32_t rval = 0;
1786 	uint32_t *ptr;
1787 	uint32_t i;
1788 
1789 	if ((rval = emlxs_offline(hba))) {
1790 		return (rval);
1791 	}
1792 	EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
1793 
1794 	/* Save pci config space */
1795 	ptr = (uint32_t *)hba->pm_config;
1796 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1797 		*ptr =
1798 		    ddi_get32(hba->pci_acc_handle,
1799 		    (uint32_t *)(hba->pci_addr + i));
1800 	}
1801 
1802 	/* Put chip in D3 state */
1803 	(void) ddi_put8(hba->pci_acc_handle,
1804 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1805 	    (uint8_t)PCI_PM_D3_STATE);
1806 
1807 #ifdef FMA_SUPPORT
1808 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1809 	    != DDI_FM_OK) {
1810 		EMLXS_MSGF(EMLXS_CONTEXT,
1811 		    &emlxs_invalid_access_handle_msg, NULL);
1812 		return (1);
1813 	}
1814 #endif  /* FMA_SUPPORT */
1815 
1816 	return (0);
1817 
1818 } /* End emlxs_power_down */
1819 
1820 
1821 extern int
1822 emlxs_power_up(emlxs_hba_t *hba)
1823 {
1824 #ifdef FMA_SUPPORT
1825 	emlxs_port_t *port = &PPORT;
1826 #endif  /* FMA_SUPPORT */
1827 	int32_t rval = 0;
1828 	uint32_t *ptr;
1829 	uint32_t i;
1830 
1831 
1832 	/* Take chip out of D3 state */
1833 	(void) ddi_put8(hba->pci_acc_handle,
1834 	    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1835 	    (uint8_t)PCI_PM_D0_STATE);
1836 
1837 	/* Must have at least 10 ms delay here */
1838 	DELAYMS(100);
1839 
1840 	/* Restore pci config space */
1841 	ptr = (uint32_t *)hba->pm_config;
1842 	for (i = 0; i < PCI_CONFIG_SIZE; i += 4, ptr++) {
1843 		(void) ddi_put32(hba->pci_acc_handle,
1844 		    (uint32_t *)(hba->pci_addr + i), *ptr);
1845 	}
1846 
1847 #ifdef FMA_SUPPORT
1848 	if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
1849 	    != DDI_FM_OK) {
1850 		EMLXS_MSGF(EMLXS_CONTEXT,
1851 		    &emlxs_invalid_access_handle_msg, NULL);
1852 		return (1);
1853 	}
1854 #endif  /* FMA_SUPPORT */
1855 
1856 	/* Bring adapter online */
1857 	if ((rval = emlxs_online(hba))) {
1858 		(void) ddi_put8(hba->pci_acc_handle,
1859 		    (uint8_t *)(hba->pci_addr + PCI_PM_CONTROL_REGISTER),
1860 		    (uint8_t)PCI_PM_D3_STATE);
1861 
1862 		return (rval);
1863 	}
1864 
1865 	return (rval);
1866 
1867 } /* End emlxs_power_up */
1868 
1869 
1870 /*
1871  *
1872  * NAME:     emlxs_ffcleanup
1873  *
1874  * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
1875  *
1876  * EXECUTION ENVIRONMENT: process only
1877  *
1878  * CALLED FROM: CFG_TERM
1879  *
1880  * INPUT: hba       - pointer to the dev_ctl area.
1881  *
1882  * RETURNS: none
1883  */
1884 extern void
1885 emlxs_ffcleanup(emlxs_hba_t *hba)
1886 {
1887 	emlxs_port_t *port = &PPORT;
1888 	uint32_t i;
1889 
1890 	/* Disable all but the mailbox interrupt */
1891 	EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
1892 
1893 	/* Make sure all port nodes are destroyed */
1894 	for (i = 0; i < MAX_VPORTS; i++) {
1895 		port = &VPORT(i);
1896 
1897 		if (port->node_count) {
1898 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1899 				(void) emlxs_sli4_unreg_all_rpi_by_port(port);
1900 			} else {
1901 				(void) emlxs_mb_unreg_rpi(port, 0xffff, 0, 0,
1902 				    0);
1903 			}
1904 		}
1905 	}
1906 
1907 	/* Clear all interrupt enable conditions */
1908 	EMLXS_SLI_DISABLE_INTR(hba, 0);
1909 
1910 	return;
1911 
1912 } /* emlxs_ffcleanup() */
1913 
1914 
1915 extern uint16_t
1916 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
1917 {
1918 	emlxs_hba_t *hba;
1919 	emlxs_port_t *port;
1920 	uint16_t iotag;
1921 	uint32_t i;
1922 
1923 	hba = cp->hba;
1924 
1925 	mutex_enter(&EMLXS_FCTAB_LOCK);
1926 
1927 	if (sbp->iotag != 0) {
1928 		port = &PPORT;
1929 
1930 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1931 		    "Pkt already registered! channel=%d iotag=%d sbp=%p",
1932 		    sbp->channel, sbp->iotag, sbp);
1933 	}
1934 
1935 	iotag = 0;
1936 	for (i = 0; i < hba->max_iotag; i++) {
1937 		if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
1938 			hba->fc_iotag = 1;
1939 		}
1940 		iotag = hba->fc_iotag++;
1941 
1942 		if (hba->fc_table[iotag] == 0 ||
1943 		    hba->fc_table[iotag] == STALE_PACKET) {
1944 			hba->io_count++;
1945 			hba->fc_table[iotag] = sbp;
1946 
1947 			sbp->iotag = iotag;
1948 			sbp->channel = cp;
1949 
1950 			break;
1951 		}
1952 		iotag = 0;
1953 	}
1954 
1955 	mutex_exit(&EMLXS_FCTAB_LOCK);
1956 
1957 	/*
1958 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
1959 	 *    "emlxs_register_pkt: channel=%d iotag=%d sbp=%p",
1960 	 *    cp->channelno, iotag, sbp);
1961 	 */
1962 
1963 	return (iotag);
1964 
1965 } /* emlxs_register_pkt() */
1966 
1967 
1968 
1969 extern emlxs_buf_t *
1970 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
1971 {
1972 	emlxs_hba_t *hba;
1973 	emlxs_buf_t *sbp;
1974 
1975 	sbp = NULL;
1976 	hba = cp->hba;
1977 
1978 	/* Check the iotag range */
1979 	if ((iotag == 0) || (iotag >= hba->max_iotag)) {
1980 		return (NULL);
1981 	}
1982 
1983 	/* Remove the sbp from the table */
1984 	mutex_enter(&EMLXS_FCTAB_LOCK);
1985 	sbp = hba->fc_table[iotag];
1986 
1987 	if (!sbp || (sbp == STALE_PACKET)) {
1988 		mutex_exit(&EMLXS_FCTAB_LOCK);
1989 		return (sbp);
1990 	}
1991 
1992 	hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
1993 	hba->io_count--;
1994 	sbp->iotag = 0;
1995 
1996 	mutex_exit(&EMLXS_FCTAB_LOCK);
1997 
1998 
1999 	/* Clean up the sbp */
2000 	mutex_enter(&sbp->mtx);
2001 
2002 	if (sbp->pkt_flags & PACKET_IN_TXQ) {
2003 		sbp->pkt_flags &= ~PACKET_IN_TXQ;
2004 		hba->channel_tx_count--;
2005 	}
2006 
2007 	if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2008 		sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2009 	}
2010 
2011 	if (sbp->bmp) {
2012 		(void) emlxs_mem_put(hba, MEM_BPL, (uint8_t *)sbp->bmp);
2013 		sbp->bmp = 0;
2014 	}
2015 
2016 	mutex_exit(&sbp->mtx);
2017 
2018 	return (sbp);
2019 
2020 } /* emlxs_unregister_pkt() */
2021 
2022 
2023 
2024 /* Flush all IO's to all nodes for a given IO Channel */
2025 extern uint32_t
2026 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2027 {
2028 	emlxs_port_t *port = &PPORT;
2029 	emlxs_buf_t *sbp;
2030 	IOCBQ *iocbq;
2031 	IOCBQ *next;
2032 	IOCB *iocb;
2033 	uint32_t channelno;
2034 	Q abort;
2035 	NODELIST *ndlp;
2036 	IOCB *icmd;
2037 	MATCHMAP *mp;
2038 	uint32_t i;
2039 	uint8_t flag[MAX_CHANNEL];
2040 
2041 	channelno = cp->channelno;
2042 	bzero((void *)&abort, sizeof (Q));
2043 	bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2044 
2045 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2046 
2047 	/* While a node needs servicing */
2048 	while (cp->nodeq.q_first) {
2049 		ndlp = (NODELIST *) cp->nodeq.q_first;
2050 
2051 		/* Check if priority queue is not empty */
2052 		if (ndlp->nlp_ptx[channelno].q_first) {
2053 			/* Transfer all iocb's to local queue */
2054 			if (abort.q_first == 0) {
2055 				abort.q_first =
2056 				    ndlp->nlp_ptx[channelno].q_first;
2057 			} else {
2058 				((IOCBQ *)abort.q_last)->next =
2059 				    (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2060 			}
2061 			flag[channelno] = 1;
2062 
2063 			abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2064 			abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2065 		}
2066 
2067 		/* Check if tx queue is not empty */
2068 		if (ndlp->nlp_tx[channelno].q_first) {
2069 			/* Transfer all iocb's to local queue */
2070 			if (abort.q_first == 0) {
2071 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2072 			} else {
2073 				((IOCBQ *)abort.q_last)->next =
2074 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2075 			}
2076 
2077 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2078 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2079 		}
2080 
2081 		/* Clear the queue pointers */
2082 		ndlp->nlp_ptx[channelno].q_first = NULL;
2083 		ndlp->nlp_ptx[channelno].q_last = NULL;
2084 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2085 
2086 		ndlp->nlp_tx[channelno].q_first = NULL;
2087 		ndlp->nlp_tx[channelno].q_last = NULL;
2088 		ndlp->nlp_tx[channelno].q_cnt = 0;
2089 
2090 		/* Remove node from service queue */
2091 
2092 		/* If this is the last node on list */
2093 		if (cp->nodeq.q_last == (void *)ndlp) {
2094 			cp->nodeq.q_last = NULL;
2095 			cp->nodeq.q_first = NULL;
2096 			cp->nodeq.q_cnt = 0;
2097 		} else {
2098 			/* Remove node from head */
2099 			cp->nodeq.q_first = ndlp->nlp_next[channelno];
2100 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2101 			    cp->nodeq.q_first;
2102 			cp->nodeq.q_cnt--;
2103 		}
2104 
2105 		/* Clear node */
2106 		ndlp->nlp_next[channelno] = NULL;
2107 	}
2108 
2109 	/* First cleanup the iocb's while still holding the lock */
2110 	iocbq = (IOCBQ *) abort.q_first;
2111 	while (iocbq) {
2112 		/* Free the IoTag and the bmp */
2113 		iocb = &iocbq->iocb;
2114 
2115 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2116 			sbp = iocbq->sbp;
2117 			if (sbp) {
2118 				hba->fc_table[sbp->iotag] = NULL;
2119 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2120 			}
2121 		} else {
2122 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2123 			    iocb->ULPIOTAG, 0);
2124 		}
2125 
2126 		if (sbp && (sbp != STALE_PACKET)) {
2127 			mutex_enter(&sbp->mtx);
2128 
2129 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2130 			/*
2131 			 * If the fpkt is already set, then we will leave it
2132 			 * alone. This ensures that this pkt is only accounted
2133 			 * for on one fpkt->flush_count
2134 			 */
2135 			if (!sbp->fpkt && fpkt) {
2136 				mutex_enter(&fpkt->mtx);
2137 				sbp->fpkt = fpkt;
2138 				fpkt->flush_count++;
2139 				mutex_exit(&fpkt->mtx);
2140 			}
2141 
2142 			mutex_exit(&sbp->mtx);
2143 		}
2144 
2145 		iocbq = (IOCBQ *)iocbq->next;
2146 	}	/* end of while */
2147 
2148 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2149 
2150 	/* Now abort the iocb's */
2151 	iocbq = (IOCBQ *)abort.q_first;
2152 	while (iocbq) {
2153 		/* Save the next iocbq for now */
2154 		next = (IOCBQ *)iocbq->next;
2155 
2156 		/* Unlink this iocbq */
2157 		iocbq->next = NULL;
2158 
2159 		/* Get the pkt */
2160 		sbp = (emlxs_buf_t *)iocbq->sbp;
2161 
2162 		if (sbp) {
2163 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2164 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2165 
2166 			if (hba->state >= FC_LINK_UP) {
2167 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2168 				    IOERR_ABORT_REQUESTED, 1);
2169 			} else {
2170 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2171 				    IOERR_LINK_DOWN, 1);
2172 			}
2173 
2174 		}
2175 		/* Free the iocb and its associated buffers */
2176 		else {
2177 			icmd = &iocbq->iocb;
2178 
2179 			/* SLI3 */
2180 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2181 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2182 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2183 				if ((hba->flag &
2184 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2185 					/* HBA is detaching or offlining */
2186 					if (icmd->ULPCOMMAND !=
2187 					    CMD_QUE_RING_LIST64_CN) {
2188 						uint8_t	*tmp;
2189 						RING *rp;
2190 
2191 						rp = &hba->sli.sli3.
2192 						    ring[channelno];
2193 						for (i = 0;
2194 						    i < icmd->ULPBDECOUNT;
2195 						    i++) {
2196 							mp = EMLXS_GET_VADDR(
2197 							    hba, rp, icmd);
2198 
2199 							tmp = (uint8_t *)mp;
2200 							if (mp) {
2201 							(void) emlxs_mem_put(
2202 							    hba, MEM_BUF, tmp);
2203 							}
2204 						}
2205 					}
2206 
2207 					(void) emlxs_mem_put(hba, MEM_IOCB,
2208 					    (uint8_t *)iocbq);
2209 				} else {
2210 					/* repost the unsolicited buffer */
2211 					EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2212 					    iocbq);
2213 				}
2214 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2215 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2216 
2217 				emlxs_tx_put(iocbq, 1);
2218 			}
2219 		}
2220 
2221 		iocbq = next;
2222 
2223 	}	/* end of while */
2224 
2225 	/* Now trigger channel service */
2226 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2227 		if (!flag[channelno]) {
2228 			continue;
2229 		}
2230 
2231 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2232 	}
2233 
2234 	return (abort.q_cnt);
2235 
2236 } /* emlxs_tx_channel_flush() */
2237 
2238 
2239 /* Flush all IO's on all or a given ring for a given node */
2240 extern uint32_t
2241 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2242     uint32_t shutdown, emlxs_buf_t *fpkt)
2243 {
2244 	emlxs_hba_t *hba = HBA;
2245 	emlxs_buf_t *sbp;
2246 	uint32_t channelno;
2247 	CHANNEL *cp;
2248 	IOCB *icmd;
2249 	IOCBQ *iocbq;
2250 	NODELIST *prev;
2251 	IOCBQ *next;
2252 	IOCB *iocb;
2253 	Q abort;
2254 	uint32_t i;
2255 	MATCHMAP *mp;
2256 	uint8_t flag[MAX_CHANNEL];
2257 
2258 	bzero((void *)&abort, sizeof (Q));
2259 
2260 	/* Flush all I/O's on tx queue to this target */
2261 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2262 
2263 	if (!ndlp->nlp_base && shutdown) {
2264 		ndlp->nlp_active = 0;
2265 	}
2266 
2267 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2268 		cp = &hba->chan[channelno];
2269 
2270 		if (chan && cp != chan) {
2271 			continue;
2272 		}
2273 
2274 		if (!ndlp->nlp_base || shutdown) {
2275 			/* Check if priority queue is not empty */
2276 			if (ndlp->nlp_ptx[channelno].q_first) {
2277 				/* Transfer all iocb's to local queue */
2278 				if (abort.q_first == 0) {
2279 					abort.q_first =
2280 					    ndlp->nlp_ptx[channelno].q_first;
2281 				} else {
2282 					((IOCBQ *)(abort.q_last))->next =
2283 					    (IOCBQ *)ndlp->nlp_ptx[channelno].
2284 					    q_first;
2285 				}
2286 
2287 				flag[channelno] = 1;
2288 
2289 				abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2290 				abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2291 			}
2292 		}
2293 
2294 		/* Check if tx queue is not empty */
2295 		if (ndlp->nlp_tx[channelno].q_first) {
2296 
2297 			/* Transfer all iocb's to local queue */
2298 			if (abort.q_first == 0) {
2299 				abort.q_first = ndlp->nlp_tx[channelno].q_first;
2300 			} else {
2301 				((IOCBQ *)abort.q_last)->next =
2302 				    (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2303 			}
2304 
2305 			abort.q_last = ndlp->nlp_tx[channelno].q_last;
2306 			abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2307 		}
2308 
2309 		/* Clear the queue pointers */
2310 		ndlp->nlp_ptx[channelno].q_first = NULL;
2311 		ndlp->nlp_ptx[channelno].q_last = NULL;
2312 		ndlp->nlp_ptx[channelno].q_cnt = 0;
2313 
2314 		ndlp->nlp_tx[channelno].q_first = NULL;
2315 		ndlp->nlp_tx[channelno].q_last = NULL;
2316 		ndlp->nlp_tx[channelno].q_cnt = 0;
2317 
2318 		/* If this node was on the channel queue, remove it */
2319 		if (ndlp->nlp_next[channelno]) {
2320 			/* If this is the only node on list */
2321 			if (cp->nodeq.q_first == (void *)ndlp &&
2322 			    cp->nodeq.q_last == (void *)ndlp) {
2323 				cp->nodeq.q_last = NULL;
2324 				cp->nodeq.q_first = NULL;
2325 				cp->nodeq.q_cnt = 0;
2326 			} else if (cp->nodeq.q_first == (void *)ndlp) {
2327 				cp->nodeq.q_first = ndlp->nlp_next[channelno];
2328 				((NODELIST *) cp->nodeq.q_last)->
2329 				    nlp_next[channelno] = cp->nodeq.q_first;
2330 				cp->nodeq.q_cnt--;
2331 			} else {
2332 				/*
2333 				 * This is a little more difficult find the
2334 				 * previous node in the circular channel queue
2335 				 */
2336 				prev = ndlp;
2337 				while (prev->nlp_next[channelno] != ndlp) {
2338 					prev = prev->nlp_next[channelno];
2339 				}
2340 
2341 				prev->nlp_next[channelno] =
2342 				    ndlp->nlp_next[channelno];
2343 
2344 				if (cp->nodeq.q_last == (void *)ndlp) {
2345 					cp->nodeq.q_last = (void *)prev;
2346 				}
2347 				cp->nodeq.q_cnt--;
2348 
2349 			}
2350 
2351 			/* Clear node */
2352 			ndlp->nlp_next[channelno] = NULL;
2353 		}
2354 
2355 	}
2356 
2357 	/* First cleanup the iocb's while still holding the lock */
2358 	iocbq = (IOCBQ *) abort.q_first;
2359 	while (iocbq) {
2360 		/* Free the IoTag and the bmp */
2361 		iocb = &iocbq->iocb;
2362 
2363 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2364 			sbp = iocbq->sbp;
2365 			if (sbp) {
2366 				hba->fc_table[sbp->iotag] = NULL;
2367 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2368 			}
2369 		} else {
2370 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2371 			    iocb->ULPIOTAG, 0);
2372 		}
2373 
2374 		if (sbp && (sbp != STALE_PACKET)) {
2375 			mutex_enter(&sbp->mtx);
2376 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2377 			/*
2378 			 * If the fpkt is already set, then we will leave it
2379 			 * alone. This ensures that this pkt is only accounted
2380 			 * for on one fpkt->flush_count
2381 			 */
2382 			if (!sbp->fpkt && fpkt) {
2383 				mutex_enter(&fpkt->mtx);
2384 				sbp->fpkt = fpkt;
2385 				fpkt->flush_count++;
2386 				mutex_exit(&fpkt->mtx);
2387 			}
2388 
2389 			mutex_exit(&sbp->mtx);
2390 		}
2391 
2392 		iocbq = (IOCBQ *) iocbq->next;
2393 
2394 	}	/* end of while */
2395 
2396 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2397 
2398 	/* Now abort the iocb's outside the locks */
2399 	iocbq = (IOCBQ *)abort.q_first;
2400 	while (iocbq) {
2401 		/* Save the next iocbq for now */
2402 		next = (IOCBQ *)iocbq->next;
2403 
2404 		/* Unlink this iocbq */
2405 		iocbq->next = NULL;
2406 
2407 		/* Get the pkt */
2408 		sbp = (emlxs_buf_t *)iocbq->sbp;
2409 
2410 		if (sbp) {
2411 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2412 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2413 
2414 			if (hba->state >= FC_LINK_UP) {
2415 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2416 				    IOERR_ABORT_REQUESTED, 1);
2417 			} else {
2418 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2419 				    IOERR_LINK_DOWN, 1);
2420 			}
2421 
2422 		}
2423 		/* Free the iocb and its associated buffers */
2424 		else {
2425 			/* CMD_CLOSE_XRI_CN should also free the memory */
2426 			icmd = &iocbq->iocb;
2427 
2428 			/* SLI3 */
2429 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2430 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2431 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2432 				if ((hba->flag &
2433 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2434 					/* HBA is detaching or offlining */
2435 					if (icmd->ULPCOMMAND !=
2436 					    CMD_QUE_RING_LIST64_CN) {
2437 						uint8_t	*tmp;
2438 						RING *rp;
2439 						int ch;
2440 
2441 						ch = ((CHANNEL *)
2442 						    iocbq->channel)->channelno;
2443 						rp = &hba->sli.sli3.ring[ch];
2444 						for (i = 0;
2445 						    i < icmd->ULPBDECOUNT;
2446 						    i++) {
2447 							mp = EMLXS_GET_VADDR(
2448 							    hba, rp, icmd);
2449 
2450 							tmp = (uint8_t *)mp;
2451 							if (mp) {
2452 							(void) emlxs_mem_put(
2453 							    hba, MEM_BUF, tmp);
2454 							}
2455 						}
2456 					}
2457 
2458 					(void) emlxs_mem_put(hba, MEM_IOCB,
2459 					    (uint8_t *)iocbq);
2460 				} else {
2461 					/* repost the unsolicited buffer */
2462 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2463 					    (CHANNEL *)iocbq->channel, iocbq);
2464 				}
2465 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2466 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2467 				/*
2468 				 * Resend the abort iocbq if any
2469 				 */
2470 				emlxs_tx_put(iocbq, 1);
2471 			}
2472 		}
2473 
2474 		iocbq = next;
2475 
2476 	}	/* end of while */
2477 
2478 	/* Now trigger channel service */
2479 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2480 		if (!flag[channelno]) {
2481 			continue;
2482 		}
2483 
2484 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2485 	}
2486 
2487 	return (abort.q_cnt);
2488 
2489 } /* emlxs_tx_node_flush() */
2490 
2491 
2492 /* Check for IO's on all or a given ring for a given node */
2493 extern uint32_t
2494 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2495 {
2496 	emlxs_hba_t *hba = HBA;
2497 	uint32_t channelno;
2498 	CHANNEL *cp;
2499 	uint32_t count;
2500 
2501 	count = 0;
2502 
2503 	/* Flush all I/O's on tx queue to this target */
2504 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2505 
2506 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2507 		cp = &hba->chan[channelno];
2508 
2509 		if (chan && cp != chan) {
2510 			continue;
2511 		}
2512 
2513 		/* Check if priority queue is not empty */
2514 		if (ndlp->nlp_ptx[channelno].q_first) {
2515 			count += ndlp->nlp_ptx[channelno].q_cnt;
2516 		}
2517 
2518 		/* Check if tx queue is not empty */
2519 		if (ndlp->nlp_tx[channelno].q_first) {
2520 			count += ndlp->nlp_tx[channelno].q_cnt;
2521 		}
2522 
2523 	}
2524 
2525 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2526 
2527 	return (count);
2528 
2529 } /* emlxs_tx_node_check() */
2530 
2531 
2532 
2533 /* Flush all IO's on the any ring for a given node's lun */
2534 extern uint32_t
2535 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2536     emlxs_buf_t *fpkt)
2537 {
2538 	emlxs_hba_t *hba = HBA;
2539 	emlxs_buf_t *sbp;
2540 	uint32_t channelno;
2541 	IOCBQ *iocbq;
2542 	IOCBQ *prev;
2543 	IOCBQ *next;
2544 	IOCB *iocb;
2545 	IOCB *icmd;
2546 	Q abort;
2547 	uint32_t i;
2548 	MATCHMAP *mp;
2549 	CHANNEL *cp;
2550 	CHANNEL *channel;
2551 	uint8_t flag[MAX_CHANNEL];
2552 
2553 	bzero((void *)&abort, sizeof (Q));
2554 
2555 	/* Flush I/O's on txQ to this target's lun */
2556 	mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2557 
2558 	channel = &hba->chan[hba->channel_fcp];
2559 
2560 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2561 		cp = &hba->chan[channelno];
2562 
2563 		if (channel && cp != channel) {
2564 			continue;
2565 		}
2566 
2567 		/* Scan the priority queue first */
2568 		prev = NULL;
2569 		iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2570 
2571 		while (iocbq) {
2572 			next = (IOCBQ *)iocbq->next;
2573 			iocb = &iocbq->iocb;
2574 			sbp = (emlxs_buf_t *)iocbq->sbp;
2575 
2576 			/* Check if this IO is for our lun */
2577 			if (sbp && (sbp->lun == lun)) {
2578 				/* Remove iocb from the node's ptx queue */
2579 				if (next == 0) {
2580 					ndlp->nlp_ptx[channelno].q_last =
2581 					    (uint8_t *)prev;
2582 				}
2583 
2584 				if (prev == 0) {
2585 					ndlp->nlp_ptx[channelno].q_first =
2586 					    (uint8_t *)next;
2587 				} else {
2588 					prev->next = next;
2589 				}
2590 
2591 				iocbq->next = NULL;
2592 				ndlp->nlp_ptx[channelno].q_cnt--;
2593 
2594 				/*
2595 				 * Add this iocb to our local abort Q
2596 				 */
2597 				if (abort.q_first) {
2598 					((IOCBQ *)abort.q_last)->next = iocbq;
2599 					abort.q_last = (uint8_t *)iocbq;
2600 					abort.q_cnt++;
2601 				} else {
2602 					abort.q_first = (uint8_t *)iocbq;
2603 					abort.q_last = (uint8_t *)iocbq;
2604 					abort.q_cnt = 1;
2605 				}
2606 				iocbq->next = NULL;
2607 				flag[channelno] = 1;
2608 
2609 			} else {
2610 				prev = iocbq;
2611 			}
2612 
2613 			iocbq = next;
2614 
2615 		}	/* while (iocbq) */
2616 
2617 
2618 		/* Scan the regular queue */
2619 		prev = NULL;
2620 		iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2621 
2622 		while (iocbq) {
2623 			next = (IOCBQ *)iocbq->next;
2624 			iocb = &iocbq->iocb;
2625 			sbp = (emlxs_buf_t *)iocbq->sbp;
2626 
2627 			/* Check if this IO is for our lun */
2628 			if (sbp && (sbp->lun == lun)) {
2629 				/* Remove iocb from the node's tx queue */
2630 				if (next == 0) {
2631 					ndlp->nlp_tx[channelno].q_last =
2632 					    (uint8_t *)prev;
2633 				}
2634 
2635 				if (prev == 0) {
2636 					ndlp->nlp_tx[channelno].q_first =
2637 					    (uint8_t *)next;
2638 				} else {
2639 					prev->next = next;
2640 				}
2641 
2642 				iocbq->next = NULL;
2643 				ndlp->nlp_tx[channelno].q_cnt--;
2644 
2645 				/*
2646 				 * Add this iocb to our local abort Q
2647 				 */
2648 				if (abort.q_first) {
2649 					((IOCBQ *) abort.q_last)->next = iocbq;
2650 					abort.q_last = (uint8_t *)iocbq;
2651 					abort.q_cnt++;
2652 				} else {
2653 					abort.q_first = (uint8_t *)iocbq;
2654 					abort.q_last = (uint8_t *)iocbq;
2655 					abort.q_cnt = 1;
2656 				}
2657 				iocbq->next = NULL;
2658 			} else {
2659 				prev = iocbq;
2660 			}
2661 
2662 			iocbq = next;
2663 
2664 		}	/* while (iocbq) */
2665 	}	/* for loop */
2666 
2667 	/* First cleanup the iocb's while still holding the lock */
2668 	iocbq = (IOCBQ *)abort.q_first;
2669 	while (iocbq) {
2670 		/* Free the IoTag and the bmp */
2671 		iocb = &iocbq->iocb;
2672 
2673 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2674 			sbp = iocbq->sbp;
2675 			if (sbp) {
2676 				hba->fc_table[sbp->iotag] = NULL;
2677 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2678 			}
2679 		} else {
2680 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2681 			    iocb->ULPIOTAG, 0);
2682 		}
2683 
2684 		if (sbp && (sbp != STALE_PACKET)) {
2685 			mutex_enter(&sbp->mtx);
2686 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2687 			/*
2688 			 * If the fpkt is already set, then we will leave it
2689 			 * alone. This ensures that this pkt is only accounted
2690 			 * for on one fpkt->flush_count
2691 			 */
2692 			if (!sbp->fpkt && fpkt) {
2693 				mutex_enter(&fpkt->mtx);
2694 				sbp->fpkt = fpkt;
2695 				fpkt->flush_count++;
2696 				mutex_exit(&fpkt->mtx);
2697 			}
2698 
2699 			mutex_exit(&sbp->mtx);
2700 		}
2701 
2702 		iocbq = (IOCBQ *) iocbq->next;
2703 
2704 	}	/* end of while */
2705 
2706 	mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2707 
2708 	/* Now abort the iocb's outside the locks */
2709 	iocbq = (IOCBQ *)abort.q_first;
2710 	while (iocbq) {
2711 		/* Save the next iocbq for now */
2712 		next = (IOCBQ *)iocbq->next;
2713 
2714 		/* Unlink this iocbq */
2715 		iocbq->next = NULL;
2716 
2717 		/* Get the pkt */
2718 		sbp = (emlxs_buf_t *)iocbq->sbp;
2719 
2720 		if (sbp) {
2721 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2722 			    "tx: sbp=%p node=%p", sbp, sbp->node);
2723 
2724 			if (hba->state >= FC_LINK_UP) {
2725 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2726 				    IOERR_ABORT_REQUESTED, 1);
2727 			} else {
2728 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2729 				    IOERR_LINK_DOWN, 1);
2730 			}
2731 		}
2732 
2733 		/* Free the iocb and its associated buffers */
2734 		else {
2735 			/* Should never happen! */
2736 			icmd = &iocbq->iocb;
2737 
2738 			/* SLI3 */
2739 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2740 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2741 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2742 				if ((hba->flag &
2743 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2744 					/* HBA is detaching or offlining */
2745 					if (icmd->ULPCOMMAND !=
2746 					    CMD_QUE_RING_LIST64_CN) {
2747 						uint8_t	*tmp;
2748 						RING *rp;
2749 						int ch;
2750 
2751 						ch = ((CHANNEL *)
2752 						    iocbq->channel)->channelno;
2753 						rp = &hba->sli.sli3.ring[ch];
2754 						for (i = 0;
2755 						    i < icmd->ULPBDECOUNT;
2756 						    i++) {
2757 							mp = EMLXS_GET_VADDR(
2758 							    hba, rp, icmd);
2759 
2760 							tmp = (uint8_t *)mp;
2761 							if (mp) {
2762 							(void) emlxs_mem_put(
2763 							    hba, MEM_BUF, tmp);
2764 							}
2765 						}
2766 					}
2767 
2768 					(void) emlxs_mem_put(hba, MEM_IOCB,
2769 					    (uint8_t *)iocbq);
2770 				} else {
2771 					/* repost the unsolicited buffer */
2772 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2773 					    (CHANNEL *)iocbq->channel, iocbq);
2774 				}
2775 			} else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2776 			    icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2777 				/*
2778 				 * Resend the abort iocbq if any
2779 				 */
2780 				emlxs_tx_put(iocbq, 1);
2781 			}
2782 		}
2783 
2784 		iocbq = next;
2785 
2786 	}	/* end of while */
2787 
2788 	/* Now trigger channel service */
2789 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
2790 		if (!flag[channelno]) {
2791 			continue;
2792 		}
2793 
2794 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2795 	}
2796 
2797 	return (abort.q_cnt);
2798 
2799 } /* emlxs_tx_lun_flush() */
2800 
2801 
2802 extern void
2803 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
2804 {
2805 	emlxs_hba_t *hba;
2806 	emlxs_port_t *port;
2807 	uint32_t channelno;
2808 	NODELIST *nlp;
2809 	CHANNEL *cp;
2810 	emlxs_buf_t *sbp;
2811 
2812 	port = (emlxs_port_t *)iocbq->port;
2813 	hba = HBA;
2814 	cp = (CHANNEL *)iocbq->channel;
2815 	nlp = (NODELIST *)iocbq->node;
2816 	channelno = cp->channelno;
2817 	sbp = (emlxs_buf_t *)iocbq->sbp;
2818 
2819 	/* under what cases, nlp is NULL */
2820 	if (nlp == NULL) {
2821 		/* Set node to base node by default */
2822 		nlp = &port->node_base;
2823 
2824 		iocbq->node = (void *)nlp;
2825 
2826 		if (sbp) {
2827 			sbp->node = (void *)nlp;
2828 		}
2829 	}
2830 
2831 	if (lock) {
2832 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2833 	}
2834 
2835 	if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
2836 		if (sbp) {
2837 			mutex_enter(&sbp->mtx);
2838 			sbp->pkt_flags |= PACKET_IN_FLUSH;
2839 			mutex_exit(&sbp->mtx);
2840 
2841 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2842 				hba->fc_table[sbp->iotag] = NULL;
2843 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
2844 			} else {
2845 				(void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
2846 			}
2847 
2848 			if (lock) {
2849 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2850 			}
2851 
2852 			if (hba->state >= FC_LINK_UP) {
2853 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2854 				    IOERR_ABORT_REQUESTED, 1);
2855 			} else {
2856 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2857 				    IOERR_LINK_DOWN, 1);
2858 			}
2859 			return;
2860 		} else {
2861 			if (lock) {
2862 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2863 			}
2864 
2865 			(void) emlxs_mem_put(hba, MEM_IOCB, (uint8_t *)iocbq);
2866 		}
2867 
2868 		return;
2869 	}
2870 
2871 	if (sbp) {
2872 
2873 		mutex_enter(&sbp->mtx);
2874 
2875 		if (sbp->pkt_flags &
2876 		    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
2877 			mutex_exit(&sbp->mtx);
2878 			if (lock) {
2879 				mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2880 			}
2881 			return;
2882 		}
2883 
2884 		sbp->pkt_flags |= PACKET_IN_TXQ;
2885 		hba->channel_tx_count++;
2886 
2887 		mutex_exit(&sbp->mtx);
2888 	}
2889 
2890 
2891 	/* Check iocbq priority */
2892 	/* Some IOCB has the high priority like reset/close xri etc */
2893 	if (iocbq->flag & IOCB_PRIORITY) {
2894 		/* Add the iocb to the bottom of the node's ptx queue */
2895 		if (nlp->nlp_ptx[channelno].q_first) {
2896 			((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
2897 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2898 			nlp->nlp_ptx[channelno].q_cnt++;
2899 		} else {
2900 			nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
2901 			nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
2902 			nlp->nlp_ptx[channelno].q_cnt = 1;
2903 		}
2904 
2905 		iocbq->next = NULL;
2906 	} else {	/* Normal priority */
2907 
2908 
2909 		/* Add the iocb to the bottom of the node's tx queue */
2910 		if (nlp->nlp_tx[channelno].q_first) {
2911 			((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
2912 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2913 			nlp->nlp_tx[channelno].q_cnt++;
2914 		} else {
2915 			nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
2916 			nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
2917 			nlp->nlp_tx[channelno].q_cnt = 1;
2918 		}
2919 
2920 		iocbq->next = NULL;
2921 	}
2922 
2923 
2924 	/*
2925 	 * Check if the node is not already on channel queue and
2926 	 * (is not closed or  is a priority request)
2927 	 */
2928 	if (!nlp->nlp_next[channelno] &&
2929 	    (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
2930 	    (iocbq->flag & IOCB_PRIORITY))) {
2931 		/* If so, then add it to the channel queue */
2932 		if (cp->nodeq.q_first) {
2933 			((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2934 			    (uint8_t *)nlp;
2935 			nlp->nlp_next[channelno] = cp->nodeq.q_first;
2936 
2937 			/*
2938 			 * If this is not the base node then add it
2939 			 * to the tail
2940 			 */
2941 			if (!nlp->nlp_base) {
2942 				cp->nodeq.q_last = (uint8_t *)nlp;
2943 			} else {	/* Otherwise, add it to the head */
2944 
2945 				/* The command node always gets priority */
2946 				cp->nodeq.q_first = (uint8_t *)nlp;
2947 			}
2948 
2949 			cp->nodeq.q_cnt++;
2950 		} else {
2951 			cp->nodeq.q_first = (uint8_t *)nlp;
2952 			cp->nodeq.q_last = (uint8_t *)nlp;
2953 			nlp->nlp_next[channelno] = nlp;
2954 			cp->nodeq.q_cnt = 1;
2955 		}
2956 	}
2957 
2958 	HBASTATS.IocbTxPut[channelno]++;
2959 
2960 	/* Adjust the channel timeout timer */
2961 	cp->timeout = hba->timer_tics + 5;
2962 
2963 	if (lock) {
2964 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2965 	}
2966 
2967 	return;
2968 
2969 } /* emlxs_tx_put() */
2970 
2971 
2972 extern IOCBQ *
2973 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
2974 {
2975 	emlxs_hba_t *hba;
2976 	uint32_t channelno;
2977 	IOCBQ *iocbq;
2978 	NODELIST *nlp;
2979 	emlxs_buf_t *sbp;
2980 
2981 	hba = cp->hba;
2982 	channelno = cp->channelno;
2983 
2984 	if (lock) {
2985 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2986 	}
2987 
2988 begin:
2989 
2990 	iocbq = NULL;
2991 
2992 	/* Check if a node needs servicing */
2993 	if (cp->nodeq.q_first) {
2994 		nlp = (NODELIST *)cp->nodeq.q_first;
2995 
2996 		/* Get next iocb from node's priority queue */
2997 
2998 		if (nlp->nlp_ptx[channelno].q_first) {
2999 			iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3000 
3001 			/* Check if this is last entry */
3002 			if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3003 				nlp->nlp_ptx[channelno].q_first = NULL;
3004 				nlp->nlp_ptx[channelno].q_last = NULL;
3005 				nlp->nlp_ptx[channelno].q_cnt = 0;
3006 			} else {
3007 				/* Remove iocb from head */
3008 				nlp->nlp_ptx[channelno].q_first =
3009 				    (void *)iocbq->next;
3010 				nlp->nlp_ptx[channelno].q_cnt--;
3011 			}
3012 
3013 			iocbq->next = NULL;
3014 		}
3015 
3016 		/* Get next iocb from node tx queue if node not closed */
3017 		else if (nlp->nlp_tx[channelno].q_first &&
3018 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3019 			iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3020 
3021 			/* Check if this is last entry */
3022 			if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3023 				nlp->nlp_tx[channelno].q_first = NULL;
3024 				nlp->nlp_tx[channelno].q_last = NULL;
3025 				nlp->nlp_tx[channelno].q_cnt = 0;
3026 			} else {
3027 				/* Remove iocb from head */
3028 				nlp->nlp_tx[channelno].q_first =
3029 				    (void *)iocbq->next;
3030 				nlp->nlp_tx[channelno].q_cnt--;
3031 			}
3032 
3033 			iocbq->next = NULL;
3034 		}
3035 
3036 		/* Now deal with node itself */
3037 
3038 		/* Check if node still needs servicing */
3039 		if ((nlp->nlp_ptx[channelno].q_first) ||
3040 		    (nlp->nlp_tx[channelno].q_first &&
3041 		    !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3042 
3043 			/*
3044 			 * If this is the base node, then don't shift the
3045 			 * pointers. We want to drain the base node before
3046 			 * moving on
3047 			 */
3048 			if (!nlp->nlp_base) {
3049 				/*
3050 				 * Just shift channel queue pointers to next
3051 				 * node
3052 				 */
3053 				cp->nodeq.q_last = (void *)nlp;
3054 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3055 			}
3056 		} else {
3057 			/* Remove node from channel queue */
3058 
3059 			/* If this is the last node on list */
3060 			if (cp->nodeq.q_last == (void *)nlp) {
3061 				cp->nodeq.q_last = NULL;
3062 				cp->nodeq.q_first = NULL;
3063 				cp->nodeq.q_cnt = 0;
3064 			} else {
3065 				/* Remove node from head */
3066 				cp->nodeq.q_first = nlp->nlp_next[channelno];
3067 				((NODELIST *)cp->nodeq.q_last)->
3068 				    nlp_next[channelno] = cp->nodeq.q_first;
3069 				cp->nodeq.q_cnt--;
3070 
3071 			}
3072 
3073 			/* Clear node */
3074 			nlp->nlp_next[channelno] = NULL;
3075 		}
3076 
3077 		/*
3078 		 * If no iocbq was found on this node, then it will have
3079 		 * been removed. So try again.
3080 		 */
3081 		if (!iocbq) {
3082 			goto begin;
3083 		}
3084 
3085 		sbp = (emlxs_buf_t *)iocbq->sbp;
3086 
3087 		if (sbp) {
3088 			/*
3089 			 * Check flags before we enter mutex in case this
3090 			 * has been flushed and destroyed
3091 			 */
3092 			if ((sbp->pkt_flags &
3093 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3094 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3095 				goto begin;
3096 			}
3097 
3098 			mutex_enter(&sbp->mtx);
3099 
3100 			if ((sbp->pkt_flags &
3101 			    (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3102 			    !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3103 				mutex_exit(&sbp->mtx);
3104 				goto begin;
3105 			}
3106 
3107 			sbp->pkt_flags &= ~PACKET_IN_TXQ;
3108 			hba->channel_tx_count--;
3109 
3110 			mutex_exit(&sbp->mtx);
3111 		}
3112 	}
3113 
3114 	if (iocbq) {
3115 		HBASTATS.IocbTxGet[channelno]++;
3116 	}
3117 
3118 	/* Adjust the ring timeout timer */
3119 	cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3120 
3121 	if (lock) {
3122 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3123 	}
3124 
3125 	return (iocbq);
3126 
3127 } /* emlxs_tx_get() */
3128 
3129 
3130 /*
3131  * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3132  * The old IoTag has to be released, the new one has to be
3133  * allocated.  Others no change
3134  * TX_CHANNEL lock is held
3135  */
3136 extern void
3137 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3138     uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3139 {
3140 	emlxs_hba_t *hba;
3141 	emlxs_port_t *port;
3142 	uint32_t fchanno, tchanno, i;
3143 
3144 	IOCBQ *iocbq;
3145 	IOCBQ *prev;
3146 	IOCBQ *next;
3147 	IOCB *iocb, *icmd;
3148 	Q tbm;		/* To Be Moved Q */
3149 	MATCHMAP *mp;
3150 
3151 	NODELIST *nlp = ndlp;
3152 	emlxs_buf_t *sbp;
3153 
3154 	NODELIST *n_prev = NULL;
3155 	NODELIST *n_next = NULL;
3156 	uint16_t count = 0;
3157 
3158 	hba = from_chan->hba;
3159 	port = &PPORT;
3160 	cmd = cmd; /* To pass lint */
3161 
3162 	fchanno = from_chan->channelno;
3163 	tchanno = to_chan->channelno;
3164 
3165 	if (lock) {
3166 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3167 	}
3168 
3169 	bzero((void *)&tbm, sizeof (Q));
3170 
3171 	/* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3172 	prev = NULL;
3173 	iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3174 
3175 	while (iocbq) {
3176 		next = (IOCBQ *)iocbq->next;
3177 		/* Check if this iocb is fcp cmd */
3178 		iocb = &iocbq->iocb;
3179 
3180 		switch (iocb->ULPCOMMAND) {
3181 		/* FCP commands */
3182 		case CMD_FCP_ICMND_CR:
3183 		case CMD_FCP_ICMND_CX:
3184 		case CMD_FCP_IREAD_CR:
3185 		case CMD_FCP_IREAD_CX:
3186 		case CMD_FCP_IWRITE_CR:
3187 		case CMD_FCP_IWRITE_CX:
3188 		case CMD_FCP_ICMND64_CR:
3189 		case CMD_FCP_ICMND64_CX:
3190 		case CMD_FCP_IREAD64_CR:
3191 		case CMD_FCP_IREAD64_CX:
3192 		case CMD_FCP_IWRITE64_CR:
3193 		case CMD_FCP_IWRITE64_CX:
3194 			/* We found a fcp cmd */
3195 			break;
3196 		default:
3197 			/* this is not fcp cmd continue */
3198 			prev = iocbq;
3199 			iocbq = next;
3200 			continue;
3201 		}
3202 
3203 		/* found a fcp cmd iocb in fchanno txq, now deque it */
3204 		if (next == NULL) {
3205 			/* This is the last iocbq */
3206 			nlp->nlp_tx[fchanno].q_last =
3207 			    (uint8_t *)prev;
3208 		}
3209 
3210 		if (prev == NULL) {
3211 			/* This is the first one then remove it from head */
3212 			nlp->nlp_tx[fchanno].q_first =
3213 			    (uint8_t *)next;
3214 		} else {
3215 			prev->next = next;
3216 		}
3217 
3218 		iocbq->next = NULL;
3219 		nlp->nlp_tx[fchanno].q_cnt--;
3220 
3221 		/* Add this iocb to our local toberemovedq */
3222 		/* This way we donot hold the TX_CHANNEL lock too long */
3223 
3224 		if (tbm.q_first) {
3225 			((IOCBQ *)tbm.q_last)->next = iocbq;
3226 			tbm.q_last = (uint8_t *)iocbq;
3227 			tbm.q_cnt++;
3228 		} else {
3229 			tbm.q_first = (uint8_t *)iocbq;
3230 			tbm.q_last = (uint8_t *)iocbq;
3231 			tbm.q_cnt = 1;
3232 		}
3233 
3234 		iocbq = next;
3235 
3236 	}	/* While (iocbq) */
3237 
3238 	if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3239 
3240 		/* from_chan->nodeq.q_first must be non NULL */
3241 		if (from_chan->nodeq.q_first) {
3242 
3243 			/* nodeq is not empty, now deal with the node itself */
3244 			if ((nlp->nlp_tx[fchanno].q_first)) {
3245 
3246 				if (!nlp->nlp_base) {
3247 					from_chan->nodeq.q_last =
3248 					    (void *)nlp;
3249 					from_chan->nodeq.q_first =
3250 					    nlp->nlp_next[fchanno];
3251 				}
3252 
3253 			} else {
3254 				n_prev = (NODELIST *)from_chan->nodeq.q_first;
3255 				count = from_chan->nodeq.q_cnt;
3256 
3257 				if (n_prev == nlp) {
3258 
3259 					/* If this is the only node on list */
3260 					if (from_chan->nodeq.q_last ==
3261 					    (void *)nlp) {
3262 						from_chan->nodeq.q_last =
3263 						    NULL;
3264 						from_chan->nodeq.q_first =
3265 						    NULL;
3266 						from_chan->nodeq.q_cnt = 0;
3267 					} else {
3268 						from_chan->nodeq.q_first =
3269 						    nlp->nlp_next[fchanno];
3270 						((NODELIST *)from_chan->
3271 						    nodeq.q_last)->
3272 						    nlp_next[fchanno] =
3273 						    from_chan->nodeq.q_first;
3274 						from_chan->nodeq.q_cnt--;
3275 					}
3276 					/* Clear node */
3277 					nlp->nlp_next[fchanno] = NULL;
3278 				} else {
3279 					count--;
3280 					do {
3281 						n_next =
3282 						    n_prev->nlp_next[fchanno];
3283 						if (n_next == nlp) {
3284 							break;
3285 						}
3286 						n_prev = n_next;
3287 					} while (count--);
3288 
3289 					if (count != 0) {
3290 
3291 						if (n_next ==
3292 						    (NODELIST *)from_chan->
3293 						    nodeq.q_last) {
3294 							n_prev->
3295 							    nlp_next[fchanno]
3296 							    =
3297 							    ((NODELIST *)
3298 							    from_chan->
3299 							    nodeq.q_last)->
3300 							    nlp_next
3301 							    [fchanno];
3302 							from_chan->nodeq.q_last
3303 							    = (uint8_t *)n_prev;
3304 						} else {
3305 
3306 							n_prev->
3307 							    nlp_next[fchanno]
3308 							    =
3309 							    n_next-> nlp_next
3310 							    [fchanno];
3311 						}
3312 						from_chan->nodeq.q_cnt--;
3313 						/* Clear node */
3314 						nlp->nlp_next[fchanno] =
3315 						    NULL;
3316 					}
3317 				}
3318 			}
3319 		}
3320 	}
3321 
3322 	/* Now cleanup the iocb's */
3323 	prev = NULL;
3324 	iocbq = (IOCBQ *)tbm.q_first;
3325 
3326 	while (iocbq) {
3327 
3328 		next = (IOCBQ *)iocbq->next;
3329 
3330 		/* Free the IoTag and the bmp */
3331 		iocb = &iocbq->iocb;
3332 
3333 		if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3334 			sbp = iocbq->sbp;
3335 			if (sbp) {
3336 				hba->fc_table[sbp->iotag] = NULL;
3337 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3338 			}
3339 		} else {
3340 			sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3341 			    iocb->ULPIOTAG, 0);
3342 		}
3343 
3344 		if (sbp && (sbp != STALE_PACKET)) {
3345 			mutex_enter(&sbp->mtx);
3346 			sbp->pkt_flags |= PACKET_IN_FLUSH;
3347 
3348 			/*
3349 			 * If the fpkt is already set, then we will leave it
3350 			 * alone. This ensures that this pkt is only accounted
3351 			 * for on one fpkt->flush_count
3352 			 */
3353 			if (!sbp->fpkt && fpkt) {
3354 				mutex_enter(&fpkt->mtx);
3355 				sbp->fpkt = fpkt;
3356 				fpkt->flush_count++;
3357 				mutex_exit(&fpkt->mtx);
3358 			}
3359 			mutex_exit(&sbp->mtx);
3360 		}
3361 		iocbq = next;
3362 
3363 	}	/* end of while */
3364 
3365 	iocbq = (IOCBQ *)tbm.q_first;
3366 	while (iocbq) {
3367 		/* Save the next iocbq for now */
3368 		next = (IOCBQ *)iocbq->next;
3369 
3370 		/* Unlink this iocbq */
3371 		iocbq->next = NULL;
3372 
3373 		/* Get the pkt */
3374 		sbp = (emlxs_buf_t *)iocbq->sbp;
3375 
3376 		if (sbp) {
3377 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3378 			"tx: sbp=%p node=%p", sbp, sbp->node);
3379 
3380 			if (hba->state >= FC_LINK_UP) {
3381 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3382 				    IOERR_ABORT_REQUESTED, 1);
3383 			} else {
3384 				emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3385 				    IOERR_LINK_DOWN, 1);
3386 			}
3387 
3388 		}
3389 		/* Free the iocb and its associated buffers */
3390 		else {
3391 			icmd = &iocbq->iocb;
3392 
3393 			/* SLI3 */
3394 			if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3395 			    icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3396 			    icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3397 				if ((hba->flag &
3398 				    (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3399 					/* HBA is detaching or offlining */
3400 					if (icmd->ULPCOMMAND !=
3401 					    CMD_QUE_RING_LIST64_CN) {
3402 						uint8_t *tmp;
3403 						RING *rp;
3404 						int ch;
3405 
3406 						ch = from_chan->channelno;
3407 						rp = &hba->sli.sli3.ring[ch];
3408 
3409 						for (i = 0;
3410 						    i < icmd->ULPBDECOUNT;
3411 						    i++) {
3412 							mp = EMLXS_GET_VADDR(
3413 							    hba, rp, icmd);
3414 
3415 							tmp = (uint8_t *)mp;
3416 							if (mp) {
3417 							(void) emlxs_mem_put(
3418 							    hba,
3419 							    MEM_BUF,
3420 							    tmp);
3421 							}
3422 						}
3423 
3424 					}
3425 
3426 					(void) emlxs_mem_put(hba, MEM_IOCB,
3427 					    (uint8_t *)iocbq);
3428 				} else {
3429 					/* repost the unsolicited buffer */
3430 					EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3431 					    from_chan, iocbq);
3432 				}
3433 			}
3434 		}
3435 
3436 		iocbq = next;
3437 
3438 	}	/* end of while */
3439 
3440 	/* Now flush the chipq if any */
3441 	if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3442 
3443 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3444 
3445 		(void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3446 
3447 		mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3448 	}
3449 
3450 	if (lock) {
3451 		mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3452 	}
3453 
3454 	return;
3455 
3456 } /* emlxs_tx_move */
3457 
3458 
3459 extern uint32_t
3460 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3461     emlxs_buf_t *fpkt)
3462 {
3463 	emlxs_hba_t *hba = HBA;
3464 	emlxs_buf_t *sbp;
3465 	IOCBQ *iocbq;
3466 	IOCBQ *next;
3467 	Q abort;
3468 	CHANNEL *cp;
3469 	uint32_t channelno;
3470 	uint8_t flag[MAX_CHANNEL];
3471 	uint32_t iotag;
3472 
3473 	bzero((void *)&abort, sizeof (Q));
3474 	bzero((void *)flag, sizeof (flag));
3475 
3476 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3477 		cp = &hba->chan[channelno];
3478 
3479 		if (chan && cp != chan) {
3480 			continue;
3481 		}
3482 
3483 		mutex_enter(&EMLXS_FCTAB_LOCK);
3484 
3485 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3486 			sbp = hba->fc_table[iotag];
3487 
3488 			if (sbp && (sbp != STALE_PACKET) &&
3489 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3490 			    (sbp->node == ndlp) &&
3491 			    (sbp->channel == cp) &&
3492 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3493 				emlxs_sbp_abort_add(port, sbp, &abort, flag,
3494 				    fpkt);
3495 			}
3496 
3497 		}
3498 		mutex_exit(&EMLXS_FCTAB_LOCK);
3499 
3500 	}	/* for */
3501 
3502 	/* Now put the iocb's on the tx queue */
3503 	iocbq = (IOCBQ *)abort.q_first;
3504 	while (iocbq) {
3505 		/* Save the next iocbq for now */
3506 		next = (IOCBQ *)iocbq->next;
3507 
3508 		/* Unlink this iocbq */
3509 		iocbq->next = NULL;
3510 
3511 		/* Send this iocbq */
3512 		emlxs_tx_put(iocbq, 1);
3513 
3514 		iocbq = next;
3515 	}
3516 
3517 	/* Now trigger channel service */
3518 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3519 		if (!flag[channelno]) {
3520 			continue;
3521 		}
3522 
3523 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3524 	}
3525 
3526 	return (abort.q_cnt);
3527 
3528 } /* emlxs_chipq_node_flush() */
3529 
3530 
3531 /* Flush all IO's left on all iotag lists */
3532 extern uint32_t
3533 emlxs_iotag_flush(emlxs_hba_t *hba)
3534 {
3535 	emlxs_port_t *port = &PPORT;
3536 	emlxs_buf_t *sbp;
3537 	IOCBQ *iocbq;
3538 	IOCB *iocb;
3539 	Q abort;
3540 	CHANNEL *cp;
3541 	uint32_t channelno;
3542 	uint32_t iotag;
3543 	uint32_t count;
3544 
3545 	count = 0;
3546 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3547 		cp = &hba->chan[channelno];
3548 
3549 		bzero((void *)&abort, sizeof (Q));
3550 
3551 		mutex_enter(&EMLXS_FCTAB_LOCK);
3552 
3553 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3554 			sbp = hba->fc_table[iotag];
3555 
3556 			/* Check if the slot is empty */
3557 			if (!sbp || (sbp == STALE_PACKET)) {
3558 				continue;
3559 			}
3560 
3561 			/* We are building an abort list per channel */
3562 			if (sbp->channel != cp) {
3563 				continue;
3564 			}
3565 
3566 			/* Set IOCB status */
3567 			iocbq = &sbp->iocbq;
3568 			iocb = &iocbq->iocb;
3569 
3570 			iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3571 			iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3572 			iocb->ULPLE = 1;
3573 			iocbq->next = NULL;
3574 
3575 			if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3576 				hba->fc_table[iotag] = NULL;
3577 				emlxs_sli4_free_xri(hba, sbp, sbp->xp);
3578 			} else {
3579 				hba->fc_table[iotag] = STALE_PACKET;
3580 				hba->io_count --;
3581 				sbp->iotag = 0;
3582 
3583 				/* Clean up the sbp */
3584 				mutex_enter(&sbp->mtx);
3585 
3586 				if (sbp->pkt_flags & PACKET_IN_TXQ) {
3587 					sbp->pkt_flags &= ~PACKET_IN_TXQ;
3588 					hba->channel_tx_count --;
3589 				}
3590 
3591 				if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3592 					sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3593 				}
3594 
3595 				if (sbp->bmp) {
3596 					(void) emlxs_mem_put(hba, MEM_BPL,
3597 					    (uint8_t *)sbp->bmp);
3598 					sbp->bmp = 0;
3599 				}
3600 
3601 				mutex_exit(&sbp->mtx);
3602 			}
3603 
3604 			/* At this point all nodes are assumed destroyed */
3605 			mutex_enter(&sbp->mtx);
3606 			sbp->node = 0;
3607 			mutex_exit(&sbp->mtx);
3608 
3609 			/* Add this iocb to our local abort Q */
3610 			if (abort.q_first) {
3611 				((IOCBQ *)abort.q_last)->next = iocbq;
3612 				abort.q_last = (uint8_t *)iocbq;
3613 				abort.q_cnt++;
3614 			} else {
3615 				abort.q_first = (uint8_t *)iocbq;
3616 				abort.q_last = (uint8_t *)iocbq;
3617 				abort.q_cnt = 1;
3618 			}
3619 		}
3620 
3621 		mutex_exit(&EMLXS_FCTAB_LOCK);
3622 
3623 		/* Trigger deferred completion */
3624 		if (abort.q_first) {
3625 			mutex_enter(&cp->rsp_lock);
3626 			if (cp->rsp_head == NULL) {
3627 				cp->rsp_head = (IOCBQ *)abort.q_first;
3628 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3629 			} else {
3630 				cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3631 				cp->rsp_tail = (IOCBQ *)abort.q_last;
3632 			}
3633 			mutex_exit(&cp->rsp_lock);
3634 
3635 			emlxs_thread_trigger2(&cp->intr_thread,
3636 			    emlxs_proc_channel, cp);
3637 
3638 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
3639 			    "Forced iotag completion. channel=%d count=%d",
3640 			    channelno, abort.q_cnt);
3641 
3642 			count += abort.q_cnt;
3643 		}
3644 	}
3645 
3646 	return (count);
3647 
3648 } /* emlxs_iotag_flush() */
3649 
3650 
3651 
3652 /* Checks for IO's on all or a given channel for a given node */
3653 extern uint32_t
3654 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3655 {
3656 	emlxs_hba_t *hba = HBA;
3657 	emlxs_buf_t *sbp;
3658 	CHANNEL *cp;
3659 	uint32_t channelno;
3660 	uint32_t count;
3661 	uint32_t iotag;
3662 
3663 	count = 0;
3664 
3665 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3666 		cp = &hba->chan[channelno];
3667 
3668 		if (chan && cp != chan) {
3669 			continue;
3670 		}
3671 
3672 		mutex_enter(&EMLXS_FCTAB_LOCK);
3673 
3674 		for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3675 			sbp = hba->fc_table[iotag];
3676 
3677 			if (sbp && (sbp != STALE_PACKET) &&
3678 			    (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3679 			    (sbp->node == ndlp) &&
3680 			    (sbp->channel == cp) &&
3681 			    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3682 				count++;
3683 			}
3684 
3685 		}
3686 		mutex_exit(&EMLXS_FCTAB_LOCK);
3687 
3688 	}	/* for */
3689 
3690 	return (count);
3691 
3692 } /* emlxs_chipq_node_check() */
3693 
3694 
3695 
3696 /* Flush all IO's for a given node's lun (on any channel) */
3697 extern uint32_t
3698 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3699     uint32_t lun, emlxs_buf_t *fpkt)
3700 {
3701 	emlxs_hba_t *hba = HBA;
3702 	emlxs_buf_t *sbp;
3703 	IOCBQ *iocbq;
3704 	IOCBQ *next;
3705 	Q abort;
3706 	uint32_t iotag;
3707 	uint8_t flag[MAX_CHANNEL];
3708 	uint32_t channelno;
3709 
3710 	bzero((void *)flag, sizeof (flag));
3711 	bzero((void *)&abort, sizeof (Q));
3712 
3713 	mutex_enter(&EMLXS_FCTAB_LOCK);
3714 	for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3715 		sbp = hba->fc_table[iotag];
3716 
3717 		if (sbp && (sbp != STALE_PACKET) &&
3718 		    sbp->pkt_flags & PACKET_IN_CHIPQ &&
3719 		    sbp->node == ndlp &&
3720 		    sbp->lun == lun &&
3721 		    !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3722 			emlxs_sbp_abort_add(port, sbp,
3723 			    &abort, flag, fpkt);
3724 		}
3725 	}
3726 	mutex_exit(&EMLXS_FCTAB_LOCK);
3727 
3728 	/* Now put the iocb's on the tx queue */
3729 	iocbq = (IOCBQ *)abort.q_first;
3730 	while (iocbq) {
3731 		/* Save the next iocbq for now */
3732 		next = (IOCBQ *)iocbq->next;
3733 
3734 		/* Unlink this iocbq */
3735 		iocbq->next = NULL;
3736 
3737 		/* Send this iocbq */
3738 		emlxs_tx_put(iocbq, 1);
3739 
3740 		iocbq = next;
3741 	}
3742 
3743 	/* Now trigger channel service */
3744 	for (channelno = 0; channelno < hba->chan_count; channelno++) {
3745 		if (!flag[channelno]) {
3746 			continue;
3747 		}
3748 
3749 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3750 	}
3751 
3752 	return (abort.q_cnt);
3753 
3754 } /* emlxs_chipq_lun_flush() */
3755 
3756 
3757 
3758 /*
3759  * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
3760  * This must be called while holding the EMLXS_FCTAB_LOCK
3761  */
3762 extern IOCBQ *
3763 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3764     uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
3765 {
3766 	emlxs_hba_t *hba = HBA;
3767 	IOCBQ *iocbq;
3768 	IOCB *iocb;
3769 	emlxs_wqe_t *wqe;
3770 	emlxs_buf_t *sbp;
3771 	uint16_t abort_iotag;
3772 
3773 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3774 		return (NULL);
3775 	}
3776 
3777 	iocbq->channel = (void *)cp;
3778 	iocbq->port = (void *)port;
3779 	iocbq->node = (void *)ndlp;
3780 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3781 
3782 	/*
3783 	 * set up an iotag using special Abort iotags
3784 	 */
3785 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3786 		hba->fc_oor_iotag = hba->max_iotag;
3787 	}
3788 	abort_iotag = hba->fc_oor_iotag++;
3789 
3790 
3791 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3792 		wqe = &iocbq->wqe;
3793 		sbp = hba->fc_table[iotag];
3794 
3795 		/* Try to issue abort by XRI if possible */
3796 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xp == NULL) {
3797 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3798 			wqe->AbortTag = iotag;
3799 		} else {
3800 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3801 			wqe->AbortTag = sbp->xp->XRI;
3802 		}
3803 		wqe->un.Abort.IA = 0;
3804 		wqe->RequestTag = abort_iotag;
3805 		wqe->Command = CMD_ABORT_XRI_CX;
3806 		wqe->Class = CLASS3;
3807 		wqe->CQId = 0x3ff;
3808 		wqe->CmdType = WQE_TYPE_ABORT;
3809 	} else {
3810 		iocb = &iocbq->iocb;
3811 		iocb->ULPIOTAG = abort_iotag;
3812 		iocb->un.acxri.abortType = flag;
3813 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3814 		iocb->un.acxri.abortIoTag = iotag;
3815 		iocb->ULPLE = 1;
3816 		iocb->ULPCLASS = class;
3817 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
3818 		iocb->ULPOWNER = OWN_CHIP;
3819 	}
3820 
3821 	return (iocbq);
3822 
3823 } /* emlxs_create_abort_xri_cn() */
3824 
3825 
3826 /* This must be called while holding the EMLXS_FCTAB_LOCK */
3827 extern IOCBQ *
3828 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3829     CHANNEL *cp, uint8_t class, int32_t flag)
3830 {
3831 	emlxs_hba_t *hba = HBA;
3832 	IOCBQ *iocbq;
3833 	IOCB *iocb;
3834 	emlxs_wqe_t *wqe;
3835 	uint16_t abort_iotag;
3836 
3837 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3838 		return (NULL);
3839 	}
3840 
3841 	iocbq->channel = (void *)cp;
3842 	iocbq->port = (void *)port;
3843 	iocbq->node = (void *)ndlp;
3844 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3845 
3846 	/*
3847 	 * set up an iotag using special Abort iotags
3848 	 */
3849 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3850 		hba->fc_oor_iotag = hba->max_iotag;
3851 	}
3852 	abort_iotag = hba->fc_oor_iotag++;
3853 
3854 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3855 		wqe = &iocbq->wqe;
3856 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3857 		wqe->un.Abort.IA = 0;
3858 		wqe->RequestTag = abort_iotag;
3859 		wqe->AbortTag = xid;
3860 		wqe->Command = CMD_ABORT_XRI_CX;
3861 		wqe->Class = CLASS3;
3862 		wqe->CQId = 0x3ff;
3863 		wqe->CmdType = WQE_TYPE_ABORT;
3864 	} else {
3865 		iocb = &iocbq->iocb;
3866 		iocb->ULPCONTEXT = xid;
3867 		iocb->ULPIOTAG = abort_iotag;
3868 		iocb->un.acxri.abortType = flag;
3869 		iocb->ULPLE = 1;
3870 		iocb->ULPCLASS = class;
3871 		iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3872 		iocb->ULPOWNER = OWN_CHIP;
3873 	}
3874 
3875 	return (iocbq);
3876 
3877 } /* emlxs_create_abort_xri_cx() */
3878 
3879 
3880 
3881 /* This must be called while holding the EMLXS_FCTAB_LOCK */
3882 extern IOCBQ *
3883 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
3884     uint16_t iotag, CHANNEL *cp)
3885 {
3886 	emlxs_hba_t *hba = HBA;
3887 	IOCBQ *iocbq;
3888 	IOCB *iocb;
3889 	emlxs_wqe_t *wqe;
3890 	emlxs_buf_t *sbp;
3891 	uint16_t abort_iotag;
3892 
3893 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3894 		return (NULL);
3895 	}
3896 
3897 	iocbq->channel = (void *)cp;
3898 	iocbq->port = (void *)port;
3899 	iocbq->node = (void *)ndlp;
3900 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3901 
3902 	/*
3903 	 * set up an iotag using special Abort iotags
3904 	 */
3905 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3906 		hba->fc_oor_iotag = hba->max_iotag;
3907 	}
3908 	abort_iotag = hba->fc_oor_iotag++;
3909 
3910 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3911 		wqe = &iocbq->wqe;
3912 		sbp = hba->fc_table[iotag];
3913 
3914 		/* Try to issue close by XRI if possible */
3915 		if (sbp == NULL || sbp == STALE_PACKET || sbp->xp == NULL) {
3916 			wqe->un.Abort.Criteria = ABORT_REQ_TAG;
3917 			wqe->AbortTag = iotag;
3918 		} else {
3919 			wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3920 			wqe->AbortTag = sbp->xp->XRI;
3921 		}
3922 		wqe->un.Abort.IA = 1;
3923 		wqe->RequestTag = abort_iotag;
3924 		wqe->Command = CMD_ABORT_XRI_CX;
3925 		wqe->Class = CLASS3;
3926 		wqe->CQId = 0x3ff;
3927 		wqe->CmdType = WQE_TYPE_ABORT;
3928 	} else {
3929 		iocb = &iocbq->iocb;
3930 		iocb->ULPIOTAG = abort_iotag;
3931 		iocb->un.acxri.abortType = 0;
3932 		iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
3933 		iocb->un.acxri.abortIoTag = iotag;
3934 		iocb->ULPLE = 1;
3935 		iocb->ULPCLASS = 0;
3936 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
3937 		iocb->ULPOWNER = OWN_CHIP;
3938 	}
3939 
3940 	return (iocbq);
3941 
3942 } /* emlxs_create_close_xri_cn() */
3943 
3944 
3945 /* This must be called while holding the EMLXS_FCTAB_LOCK */
3946 extern IOCBQ *
3947 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
3948     CHANNEL *cp)
3949 {
3950 	emlxs_hba_t *hba = HBA;
3951 	IOCBQ *iocbq;
3952 	IOCB *iocb;
3953 	emlxs_wqe_t *wqe;
3954 	uint16_t abort_iotag;
3955 
3956 	if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB, 0)) == NULL) {
3957 		return (NULL);
3958 	}
3959 
3960 	iocbq->channel = (void *)cp;
3961 	iocbq->port = (void *)port;
3962 	iocbq->node = (void *)ndlp;
3963 	iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
3964 
3965 	/*
3966 	 * set up an iotag using special Abort iotags
3967 	 */
3968 	if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
3969 		hba->fc_oor_iotag = hba->max_iotag;
3970 	}
3971 	abort_iotag = hba->fc_oor_iotag++;
3972 
3973 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3974 		wqe = &iocbq->wqe;
3975 		wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3976 		wqe->un.Abort.IA = 1;
3977 		wqe->RequestTag = abort_iotag;
3978 		wqe->AbortTag = xid;
3979 		wqe->Command = CMD_ABORT_XRI_CX;
3980 		wqe->Class = CLASS3;
3981 		wqe->CQId = 0x3ff;
3982 		wqe->CmdType = WQE_TYPE_ABORT;
3983 	} else {
3984 		iocb = &iocbq->iocb;
3985 		iocb->ULPCONTEXT = xid;
3986 		iocb->ULPIOTAG = abort_iotag;
3987 		iocb->ULPLE = 1;
3988 		iocb->ULPCLASS = 0;
3989 		iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3990 		iocb->ULPOWNER = OWN_CHIP;
3991 	}
3992 
3993 	return (iocbq);
3994 
3995 } /* emlxs_create_close_xri_cx() */
3996 
3997 
3998 #ifdef SFCT_SUPPORT
3999 void
4000 emlxs_abort_fct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4001 {
4002 	CHANNEL *cp;
4003 	IOCBQ *iocbq;
4004 	IOCB *iocb;
4005 
4006 	if (rxid == 0 || rxid == 0xFFFF) {
4007 		return;
4008 	}
4009 
4010 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4011 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
4012 		    "Aborting FCT exchange: xid=%x", rxid);
4013 
4014 		if (emlxs_sli4_unreserve_xri(hba, rxid) == 0) {
4015 			/* We have no way to abort unsolicited exchanges */
4016 			/* that we have not responded to at this time */
4017 			/* So we will return for now */
4018 			return;
4019 		}
4020 	}
4021 
4022 	cp = &hba->chan[hba->channel_fcp];
4023 
4024 	mutex_enter(&EMLXS_FCTAB_LOCK);
4025 
4026 	/* Create the abort IOCB */
4027 	if (hba->state >= FC_LINK_UP) {
4028 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4029 		    CLASS3, ABORT_TYPE_ABTS);
4030 	} else {
4031 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4032 	}
4033 
4034 	mutex_exit(&EMLXS_FCTAB_LOCK);
4035 
4036 	if (iocbq) {
4037 		iocb = &iocbq->iocb;
4038 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
4039 		    "Aborting FCT exchange: xid=%x iotag=%x", rxid,
4040 		    iocb->ULPIOTAG);
4041 
4042 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4043 	}
4044 
4045 } /* emlxs_abort_fct_exchange() */
4046 #endif /* SFCT_SUPPORT */
4047 
4048 
4049 void
4050 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4051 {
4052 	CHANNEL *cp;
4053 	IOCBQ *iocbq;
4054 	IOCB *iocb;
4055 
4056 	if (rxid == 0 || rxid == 0xFFFF) {
4057 		return;
4058 	}
4059 
4060 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4061 
4062 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4063 		    "Aborting ELS exchange: xid=%x", rxid);
4064 
4065 		if (emlxs_sli4_unreserve_xri(hba, rxid) == 0) {
4066 			/* We have no way to abort unsolicited exchanges */
4067 			/* that we have not responded to at this time */
4068 			/* So we will return for now */
4069 			return;
4070 		}
4071 	}
4072 
4073 	cp = &hba->chan[hba->channel_els];
4074 
4075 	mutex_enter(&EMLXS_FCTAB_LOCK);
4076 
4077 	/* Create the abort IOCB */
4078 	if (hba->state >= FC_LINK_UP) {
4079 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4080 		    CLASS3, ABORT_TYPE_ABTS);
4081 	} else {
4082 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4083 	}
4084 
4085 	mutex_exit(&EMLXS_FCTAB_LOCK);
4086 
4087 	if (iocbq) {
4088 		iocb = &iocbq->iocb;
4089 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4090 		    "Aborting ELS exchange: xid=%x iotag=%x", rxid,
4091 		    iocb->ULPIOTAG);
4092 
4093 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4094 	}
4095 
4096 } /* emlxs_abort_els_exchange() */
4097 
4098 
4099 void
4100 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4101 {
4102 	CHANNEL *cp;
4103 	IOCBQ *iocbq;
4104 	IOCB *iocb;
4105 
4106 	if (rxid == 0 || rxid == 0xFFFF) {
4107 		return;
4108 	}
4109 
4110 	if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4111 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4112 		    "Aborting CT exchange: xid=%x", rxid);
4113 
4114 		if (emlxs_sli4_unreserve_xri(hba, rxid) == 0) {
4115 			/* We have no way to abort unsolicited exchanges */
4116 			/* that we have not responded to at this time */
4117 			/* So we will return for now */
4118 			return;
4119 		}
4120 	}
4121 
4122 	cp = &hba->chan[hba->channel_ct];
4123 
4124 	mutex_enter(&EMLXS_FCTAB_LOCK);
4125 
4126 	/* Create the abort IOCB */
4127 	if (hba->state >= FC_LINK_UP) {
4128 		iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4129 		    CLASS3, ABORT_TYPE_ABTS);
4130 	} else {
4131 		iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4132 	}
4133 
4134 	mutex_exit(&EMLXS_FCTAB_LOCK);
4135 
4136 	if (iocbq) {
4137 		iocb = &iocbq->iocb;
4138 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4139 		    "Aborting CT exchange: xid=%x iotag=%x", rxid,
4140 		    iocb->ULPIOTAG);
4141 
4142 		EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4143 	}
4144 
4145 } /* emlxs_abort_ct_exchange() */
4146 
4147 
4148 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4149 static void
4150 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4151     uint8_t *flag, emlxs_buf_t *fpkt)
4152 {
4153 	emlxs_hba_t *hba = HBA;
4154 	IOCBQ *iocbq;
4155 	CHANNEL *cp;
4156 	NODELIST *ndlp;
4157 
4158 	cp = (CHANNEL *)sbp->channel;
4159 	ndlp = sbp->node;
4160 
4161 	/* Create the close XRI IOCB */
4162 	iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4163 
4164 	/*
4165 	 * Add this iocb to our local abort Q
4166 	 * This way we don't hold the CHIPQ lock too long
4167 	 */
4168 	if (iocbq) {
4169 		if (abort->q_first) {
4170 			((IOCBQ *)abort->q_last)->next = iocbq;
4171 			abort->q_last = (uint8_t *)iocbq;
4172 			abort->q_cnt++;
4173 		} else {
4174 			abort->q_first = (uint8_t *)iocbq;
4175 			abort->q_last = (uint8_t *)iocbq;
4176 			abort->q_cnt = 1;
4177 		}
4178 		iocbq->next = NULL;
4179 	}
4180 
4181 	/* set the flags */
4182 	mutex_enter(&sbp->mtx);
4183 
4184 	sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4185 
4186 	sbp->ticks = hba->timer_tics + 10;
4187 	sbp->abort_attempts++;
4188 
4189 	flag[cp->channelno] = 1;
4190 
4191 	/*
4192 	 * If the fpkt is already set, then we will leave it alone
4193 	 * This ensures that this pkt is only accounted for on one
4194 	 * fpkt->flush_count
4195 	 */
4196 	if (!sbp->fpkt && fpkt) {
4197 		mutex_enter(&fpkt->mtx);
4198 		sbp->fpkt = fpkt;
4199 		fpkt->flush_count++;
4200 		mutex_exit(&fpkt->mtx);
4201 	}
4202 
4203 	mutex_exit(&sbp->mtx);
4204 
4205 	return;
4206 
4207 }	/* emlxs_sbp_abort_add() */
4208